repo
stringclasses
15 values
pull_number
int64
147
18.3k
instance_id
stringlengths
14
31
issue_numbers
sequencelengths
1
3
base_commit
stringlengths
40
40
patch
stringlengths
289
585k
test_patch
stringlengths
355
6.82M
problem_statement
stringlengths
25
49.4k
hints_text
stringlengths
0
58.9k
created_at
unknowndate
2014-08-08 22:09:38
2024-06-28 03:13:12
version
stringclasses
110 values
PASS_TO_PASS
sequencelengths
0
4.82k
FAIL_TO_PASS
sequencelengths
1
1.06k
language
stringclasses
4 values
image_urls
sequencelengths
0
4
website links
sequencelengths
0
4
webpack/webpack
18,069
webpack__webpack-18069
[ "17989" ]
2a063f88da046377ee4eac1e57fc2ea9966e55c8
diff --git a/lib/buildChunkGraph.js b/lib/buildChunkGraph.js --- a/lib/buildChunkGraph.js +++ b/lib/buildChunkGraph.js @@ -44,7 +44,7 @@ const { getEntryRuntime, mergeRuntime } = require("./util/runtime"); * @property {boolean | undefined} minAvailableModulesOwned true, if minAvailableModules is owned and can be modified * @property {ModuleSetPlus[]} availableModulesToBeMerged enqueued updates to the minimal set of available modules * @property {Set<Module>=} skippedItems modules that were skipped because module is already available in parent chunks (need to reconsider when minAvailableModules is shrinking) - * @property {Set<[Module, ConnectionState]>=} skippedModuleConnections referenced modules that where skipped because they were not active in this runtime + * @property {Set<[Module, ModuleGraphConnection[]]>=} skippedModuleConnections referenced modules that where skipped because they were not active in this runtime * @property {ModuleSetPlus | undefined} resultingAvailableModules set of modules available including modules from this chunk group * @property {Set<ChunkGroupInfo> | undefined} children set of children chunk groups, that will be revisited when availableModules shrink * @property {Set<ChunkGroupInfo> | undefined} availableSources set of chunk groups that are the source for minAvailableModules @@ -73,6 +73,25 @@ const bySetSize = (a, b) => { return b.size + b.plus.size - a.size - a.plus.size; }; +/** + * @param {ModuleGraphConnection[]} connections list of connections + * @param {RuntimeSpec} runtime for which runtime + * @returns {ConnectionState} connection state + */ +const getActiveStateOfConnections = (connections, runtime) => { + let merged = connections[0].getActiveState(runtime); + if (merged === true) return true; + for (let i = 1; i < connections.length; i++) { + const c = connections[i]; + merged = ModuleGraphConnection.addConnectionStates( + merged, + c.getActiveState(runtime) + ); + if (merged === true) return true; + } + return merged; +}; + const extractBlockModules = (module, moduleGraph, runtime, blockModulesMap) => { let blockCache; let modules; @@ -99,9 +118,6 @@ const extractBlockModules = (module, moduleGraph, runtime, blockModulesMap) => { if (!m) continue; // We skip weak connections if (connection.weak) continue; - const state = connection.getActiveState(runtime); - // We skip inactive connections - if (state === false) continue; const block = moduleGraph.getParentBlock(d); let index = moduleGraph.getParentBlockIndex(d); @@ -115,41 +131,47 @@ const extractBlockModules = (module, moduleGraph, runtime, blockModulesMap) => { modules = blockModulesMap.get((blockCache = block)); } - const i = index << 2; + const i = index * 3; modules[i] = m; - modules[i + 1] = state; + modules[i + 1] = connection.getActiveState(runtime); + modules[i + 2] = connection; } for (const modules of arrays) { if (modules.length === 0) continue; let indexMap; let length = 0; - outer: for (let j = 0; j < modules.length; j += 2) { + outer: for (let j = 0; j < modules.length; j += 3) { const m = modules[j]; if (m === undefined) continue; const state = modules[j + 1]; + const connection = modules[j + 2]; if (indexMap === undefined) { let i = 0; - for (; i < length; i += 2) { + for (; i < length; i += 3) { if (modules[i] === m) { const merged = modules[i + 1]; + modules[i + 2].push(connection); if (merged === true) continue outer; modules[i + 1] = ModuleGraphConnection.addConnectionStates( merged, state ); + continue outer; } } modules[length] = m; length++; modules[length] = state; length++; + modules[length] = [connection]; + length++; if (length > 30) { // To avoid worse case performance, we will use an index map for // linear cost access, which allows to maintain O(n) complexity // while keeping allocations down to a minimum indexMap = new Map(); - for (let i = 0; i < length; i += 2) { + for (let i = 0; i < length; i += 3) { indexMap.set(modules[i], i + 1); } } @@ -157,6 +179,7 @@ const extractBlockModules = (module, moduleGraph, runtime, blockModulesMap) => { const idx = indexMap.get(m); if (idx !== undefined) { const merged = modules[idx]; + modules[idx + 1].push(connection); if (merged === true) continue outer; modules[idx] = ModuleGraphConnection.addConnectionStates( merged, @@ -168,6 +191,8 @@ const extractBlockModules = (module, moduleGraph, runtime, blockModulesMap) => { modules[length] = state; indexMap.set(m, length); length++; + modules[length] = [connection]; + length++; } } } @@ -207,7 +232,7 @@ const visitModules = ( * * @param {DependenciesBlock} block block * @param {RuntimeSpec} runtime runtime - * @returns {(Module | ConnectionState)[]} block modules in flatten tuples + * @returns {(Module | ConnectionState | ModuleGraphConnection[])[]} block modules in flatten tuples */ const getBlockModules = (block, runtime) => { if (blockModulesMapRuntime !== runtime) { @@ -382,7 +407,7 @@ const visitModules = ( /** @type {QueueItem[]} */ let queueDelayed = []; - /** @type {[Module, ConnectionState][]} */ + /** @type {[Module, ModuleGraphConnection[]][]} */ const skipConnectionBuffer = []; /** @type {Module[]} */ const skipBuffer = []; @@ -582,7 +607,7 @@ const visitModules = ( const { minAvailableModules } = chunkGroupInfo; // Buffer items because order need to be reversed to get indices correct // Traverse all referenced modules - for (let i = 0; i < blockModules.length; i += 2) { + for (let i = 0; i < blockModules.length; i += 3) { const refModule = /** @type {Module} */ (blockModules[i]); if (chunkGraph.isModuleInChunk(refModule, chunk)) { // skip early if already connected @@ -592,7 +617,11 @@ const visitModules = ( blockModules[i + 1] ); if (activeState !== true) { - skipConnectionBuffer.push([refModule, activeState]); + const connections = /** @type {ModuleGraphConnection[]} */ ( + blockModules[i + 2] + ); + skipConnectionBuffer.push([refModule, connections]); + // We skip inactive connections if (activeState === false) continue; } if ( @@ -666,7 +695,7 @@ const visitModules = ( if (blockModules !== undefined) { // Traverse all referenced modules - for (let i = 0; i < blockModules.length; i += 2) { + for (let i = 0; i < blockModules.length; i += 3) { const refModule = /** @type {Module} */ (blockModules[i]); const activeState = /** @type {ConnectionState} */ ( blockModules[i + 1] @@ -1172,7 +1201,11 @@ const visitModules = ( /** @type {ModuleSetPlus} */ (info.minAvailableModules); for (const entry of info.skippedModuleConnections) { - const [module, activeState] = entry; + const [module, connections] = entry; + const activeState = getActiveStateOfConnections( + connections, + info.runtime + ); if (activeState === false) continue; if (activeState === true) { info.skippedModuleConnections.delete(entry); @@ -1286,7 +1319,7 @@ const visitModules = ( return; } - for (let i = 0; i < blockModules.length; i += 2) { + for (let i = 0; i < blockModules.length; i += 3) { const refModule = /** @type {Module} */ (blockModules[i]); const activeState = /** @type {ConnectionState} */ ( blockModules[i + 1]
diff --git a/test/__snapshots__/StatsTestCases.basictest.js.snap b/test/__snapshots__/StatsTestCases.basictest.js.snap --- a/test/__snapshots__/StatsTestCases.basictest.js.snap +++ b/test/__snapshots__/StatsTestCases.basictest.js.snap @@ -3355,8 +3355,8 @@ cacheable modules 807 bytes ./first.js 236 bytes [built] [code generated] ./second.js 202 bytes [built] [code generated] ./vendor.js 25 bytes [built] [code generated] - ./common2.js 25 bytes [built] [code generated] ./module_first.js 31 bytes [built] [code generated] + ./common2.js 25 bytes [built] [code generated] ./lazy_first.js 91 bytes [built] [code generated] ./lazy_shared.js 56 bytes [built] [code generated] ./lazy_second.js 91 bytes [built] [code generated] @@ -3382,8 +3382,8 @@ cacheable modules 975 bytes ModuleConcatenation bailout: Cannot concat with ./common_lazy_shared.js: Module ./common_lazy_shared.js is referenced from different chunks by these modules: ./lazy_shared.js ./common_lazy_shared.js 25 bytes [built] [code generated] orphan modules 118 bytes [orphan] - ./common2.js 25 bytes [orphan] [built] ./module_first.js 31 bytes [orphan] [built] + ./common2.js 25 bytes [orphan] [built] ./common.js 37 bytes [orphan] [built] ModuleConcatenation bailout: Module is not in any chunk ./common_lazy.js 25 bytes [orphan] [built] @@ -3457,10 +3457,10 @@ cacheable modules 1.22 KiB | [no exports] | [no exports used] | Statement (ExpressionStatement) with side effects in source code at 4:0-30 - | ./node_modules/big-module/a.js 58 bytes [built] - | [only some exports used: a] | ./node_modules/module-with-export/index.js 1.01 KiB [built] | [only some exports used: smallVar] + | ./node_modules/big-module/a.js 58 bytes [built] + | [only some exports used: a] ./node_modules/module-with-export/emptyModule.js 43 bytes [built] [code generated] [used exports unknown] ModuleConcatenation bailout: Module is not an ECMAScript module diff --git a/test/configCases/chunk-graph/issue-17989/entry-a.js b/test/configCases/chunk-graph/issue-17989/entry-a.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/entry-a.js @@ -0,0 +1,10 @@ +import loadModule from "./shared" + +it("should not have a.add from entry-a + entry-b", () => { + return loadModule().then(module => { + const { arg } = module; + expect(arg).toBe(42) + expect(typeof __webpack_modules__["./util2.js"]).toBe("function") + expect(require.cache["./util2.js"]).toBe(undefined); // not loaded on __webpack_require__.c["./util2.js"] + }); +}); diff --git a/test/configCases/chunk-graph/issue-17989/entry-b.js b/test/configCases/chunk-graph/issue-17989/entry-b.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/entry-b.js @@ -0,0 +1,17 @@ +it("should have util2.js in util chunk", () => { + return import("./shared") + .then(({ default: loadModule }) => loadModule()) + .then((module) => { + let arg = module.arg; + expect(arg).toBe(42) + expect(typeof __webpack_modules__["./util2.js"]).toBe("function") + expect(typeof require.cache["./util2.js"]).toBe("object"); // loaded on __webpack_require__.c["./util2.js"] + return arg + }) + .then(arg => { + return import("./util1").then(module => { + let res = module.f(arg); + expect(res).toBe(84); + }) + }) +}); diff --git a/test/configCases/chunk-graph/issue-17989/shared.js b/test/configCases/chunk-graph/issue-17989/shared.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/shared.js @@ -0,0 +1 @@ +export default () => import("./util") diff --git a/test/configCases/chunk-graph/issue-17989/test.config.js b/test/configCases/chunk-graph/issue-17989/test.config.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/test.config.js @@ -0,0 +1,5 @@ +module.exports = { + findBundle: function (i, options) { + return ["a.js", "b.js"]; + } +}; diff --git a/test/configCases/chunk-graph/issue-17989/util.js b/test/configCases/chunk-graph/issue-17989/util.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/util.js @@ -0,0 +1 @@ +export { arg } from './util1' diff --git a/test/configCases/chunk-graph/issue-17989/util1.js b/test/configCases/chunk-graph/issue-17989/util1.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/util1.js @@ -0,0 +1,2 @@ +export const arg = 42 +export { f } from "./util2" diff --git a/test/configCases/chunk-graph/issue-17989/util2.js b/test/configCases/chunk-graph/issue-17989/util2.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/util2.js @@ -0,0 +1 @@ +export const f = a => a * 2 diff --git a/test/configCases/chunk-graph/issue-17989/webpack.config.js b/test/configCases/chunk-graph/issue-17989/webpack.config.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-graph/issue-17989/webpack.config.js @@ -0,0 +1,17 @@ +/** @type {import("../../../../").Configuration} */ +module.exports = { + entry: { + a: "./entry-a", + b: "./entry-b" + }, + optimization: { + sideEffects: true, + providedExports: true, + usedExports: true, + concatenateModules: false, + moduleIds: "named" + }, + output: { + filename: "[name].js" + } +};
New case for "__webpack_modules__[moduleId] is not a function" error with Module Federation <!-- Please don't delete this template because we'll close your issue --> <!-- Before creating an issue please make sure you are using the latest version of webpack. --> # Bug report <!-- Please ask questions on StackOverflow or the GitHub Discussions. --> <!-- https://github.com/webpack/webpack/discussions --> <!-- https://stackoverflow.com/questions/ask?tags=webpack --> <!-- Issues which contain questions or support requests will be closed. --> **What is the current behavior?** With a certain dependency graph, webpack module federation applications crash with error: ``` __webpack_modules__[moduleId] is not a function or Cannot read properties of undefined (reading 'call') ``` This is indeed a known bug and had been fixed, but it happens again with module federation https://github.com/webpack/webpack/issues/11770 Issue describe: * Folder structure: ``` /src β”œβ”€β”€ /components β”‚ β”œβ”€β”€ Button.js β”œβ”€β”€ /helpers β”‚ β”œβ”€β”€ index.js β”œβ”€β”€ /utils β”‚ β”œβ”€β”€ index.js β”œβ”€β”€ App.js ``` * Dependency graph: ![example remote app](https://github.com/webpack/webpack/assets/41647635/955ec8c3-7751-485e-a17e-f3ca847beb50) * Bundle result: ![chunking result](https://github.com/webpack/webpack/assets/41647635/a49bd137-2474-48ff-a5cc-a23ae184b5de) * What is the problem: `./src/utils/index.jsx` module is declared in `src_helpers_index_js` but required in `src_comopnent_Button_js`, so in runtime, it cannot find the module Like I mentioned, this is a known issue and @sokra had fixed it in https://github.com/webpack/webpack/issues/11770. But he didn't handle for module federation case, because in this case, two federated modules `./RemoteApp` and `./helpers` are using the same runtime id **If the current behavior is a bug, please provide the steps to reproduce.** https://github.com/tqnghia1998/webpack-chunking-error-demo (latest webpack version, just yarn install, then yarn start) <!-- A great way to do this is to provide your configuration via a GitHub repository --> <!-- The most helpful is a minimal reproduction with instructions on how to reproduce --> <!-- Repositories with too many files or large `webpack.config.js` files are not suitable --> <!-- Please only add small code snippets directly into this issue --> <!-- https://gist.github.com is a good place for longer code snippets --> <!-- If your issue is caused by a plugin or loader, please create an issue on the loader/plugin repository instead --> **What is the expected behavior?** No runtime errors, `./src/utils/index.jsx` should be initialized when loading RemoteApp <!-- "It should work" is not a helpful explanation --> <!-- Explain exactly how it should behave --> **Other relevant information:** Webpack version: 5.89.0 Node.js version: v18.12.1 Operating System: Mac 14.2.1 Additional tools: -
PR welcome
"2024-02-09T05:07:26Z"
5.90
[]
[ "test/StatsTestCases.basictest.js", "test/ConfigTestCases.basictest.js" ]
JavaScript
[]
[]
webpack/webpack
18,095
webpack__webpack-18095
[ "18091" ]
eaa685e1310e2abc882adc269e9fd6e264bf3f06
diff --git a/lib/dependencies/HarmonyImportSpecifierDependency.js b/lib/dependencies/HarmonyImportSpecifierDependency.js --- a/lib/dependencies/HarmonyImportSpecifierDependency.js +++ b/lib/dependencies/HarmonyImportSpecifierDependency.js @@ -143,7 +143,8 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { */ getReferencedExports(moduleGraph, runtime) { let ids = this.getIds(moduleGraph); - if (ids.length === 0) return this._getReferencedExportsInDestructuring(); + if (ids.length === 0) + return this._getReferencedExportsInDestructuring(moduleGraph); let namespaceObjectAsContext = this.namespaceObjectAsContext; if (ids[0] === "default") { const selfModule = moduleGraph.getParentModule(this); @@ -160,7 +161,7 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { case "default-only": case "default-with-named": if (ids.length === 1) - return this._getReferencedExportsInDestructuring(); + return this._getReferencedExportsInDestructuring(moduleGraph); ids = ids.slice(1); namespaceObjectAsContext = true; break; @@ -178,21 +179,30 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { ids = ids.slice(0, -1); } - return this._getReferencedExportsInDestructuring(ids); + return this._getReferencedExportsInDestructuring(moduleGraph, ids); } /** + * @param {ModuleGraph} moduleGraph module graph * @param {string[]=} ids ids * @returns {(string[] | ReferencedExport)[]} referenced exports */ - _getReferencedExportsInDestructuring(ids) { + _getReferencedExportsInDestructuring(moduleGraph, ids) { if (this.referencedPropertiesInDestructuring) { /** @type {ReferencedExport[]} */ const refs = []; + const importedModule = moduleGraph.getModule(this); + const canMangle = + Array.isArray(ids) && + ids.length > 0 && + !moduleGraph + .getExportsInfo(importedModule) + .getExportInfo(ids[0]) + .isReexport(); for (const key of this.referencedPropertiesInDestructuring) { refs.push({ name: ids ? ids.concat([key]) : [key], - canMangle: Array.isArray(ids) && ids.length > 0 + canMangle }); } return refs;
diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/index.js b/test/configCases/mangle/mangle-with-destructuring-assignment/index.js --- a/test/configCases/mangle/mangle-with-destructuring-assignment/index.js +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/index.js @@ -1,4 +1,5 @@ import * as module from "./module"; +import { obj3, obj3CanMangle, obj4, obj4CanMangle } from "./reexport?side-effects" // enable side effects to ensure reexport is not skipped it("should not mangle export when destructuring module", () => { const { obj: { a, b }, objCanMangle } = module @@ -19,3 +20,18 @@ it("should mangle export when using module dot property", () => { expect(module.aaa).toBe("aaa"); expect(module.aaaCanMangle).toBe(true) }); + +it("should not mangle export when destructuring module's property is a module", () => { + const { aaa, bbb } = obj3; + expect(aaa).toBe("a"); + expect(bbb).toBe("b"); + expect(obj3CanMangle).toBe(false) +}); + +it("should not mangle export when destructuring module's nested property is a module", () => { + const { nested: { obj5, obj5CanMangle } } = obj4; + expect(obj5.aaa).toBe("a"); + expect(obj5.bbb).toBe("b"); + expect(obj4CanMangle).toBe(true); + expect(obj5CanMangle).toBe(false) +}); diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/module2.js b/test/configCases/mangle/mangle-with-destructuring-assignment/module2.js new file mode 100644 --- /dev/null +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/module2.js @@ -0,0 +1,2 @@ +export const aaa = "a"; +export const bbb = "b"; diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/module3.js b/test/configCases/mangle/mangle-with-destructuring-assignment/module3.js new file mode 100644 --- /dev/null +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/module3.js @@ -0,0 +1,2 @@ +export const aaa = "a"; +export const bbb = "b"; diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/reexport.js b/test/configCases/mangle/mangle-with-destructuring-assignment/reexport.js new file mode 100644 --- /dev/null +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/reexport.js @@ -0,0 +1,6 @@ +export * as obj3 from "./module2" +export const obj3CanMangle = __webpack_exports_info__.obj3.canMangle; + +import * as reexport2 from "./reexport2?side-effects" +export const obj4 = { nested: reexport2 } +export const obj4CanMangle = __webpack_exports_info__.reexport2.canMangle; diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/reexport2.js b/test/configCases/mangle/mangle-with-destructuring-assignment/reexport2.js new file mode 100644 --- /dev/null +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/reexport2.js @@ -0,0 +1,2 @@ +export * as obj5 from "./module3" +export const obj5CanMangle = __webpack_exports_info__.obj5.canMangle; diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js b/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js --- a/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js @@ -1,5 +1,13 @@ /** @type {import("../../../../").Configuration} */ module.exports = { + module: { + rules: [ + { + resourceQuery: /side-effects/, + sideEffects: true + } + ] + }, optimization: { mangleExports: true, usedExports: true,
Destructured values from an imported namespace are mangled and returning undefined <!-- Please don't delete this template because we'll close your issue --> <!-- Before creating an issue please make sure you are using the latest version of webpack. --> # Bug report There seems to be a strange issue with name mangling change that was introduced in 5.90.2. This caused our build to fail because something that was bundled correctly in 5.90.1 was no longer bundled correctly. I've narrowed it down to [this change](https://github.com/webpack/webpack/commit/2a063f88da046377ee4eac1e57fc2ea9966e55c8#diff-96335405b47eef62b17ba6fe88f6b35a1e37c9cf98fd762fe74599aca1c65d94R195), but I haven't really been successful in figuring out what is going on, no doubt because I'm pretty new to spelunking through webpack. I've created a [repo here](https://github.com/imccausl/webpack-5.90.2-bug-repro-repo) to reproduce the behaviour, although there is one minor difference between this repo and the way our webpack config is set up at my company: in the linked repo, I can only reproduce the behaviour if I set `optimization.sideEffects` to `false`. It's my understanding that the default for `sideEffects` is `true` and as far as I can tell, `sideEffects` was not set to false when this issue occurred so I might be missing something. Either way, I thought I'd submit this here in case others with more experience with the internals of webpack have any ideas. **What is the current behavior?** Please see [the repo I set up](https://github.com/imccausl/webpack-5.90.2-bug-repro-repo) for steps to reproduce the issue. What effectively happens is that I import a namespaced module, and then I destructure a few items from this module after importing. Webpack mangles the names after the import, so that the subsequent destructured values are undefined. **What is the expected behavior?** The expected behaviour is that these values are not undefined and can be accessed correctly. **Other relevant information:** webpack version: 5.90.2 Node.js version: 20.11.1 Operating System: MacOS/Linux Additional tools:
"2024-02-18T09:24:23Z"
5.90
[]
[ "test/ConfigTestCases.basictest.js" ]
JavaScript
[]
[]
webpack/webpack
18,194
webpack__webpack-18194
[ "18111" ]
a48c34b34d2d6c44f9b2b221d7baf278d34ac0be
diff --git a/lib/buildChunkGraph.js b/lib/buildChunkGraph.js --- a/lib/buildChunkGraph.js +++ b/lib/buildChunkGraph.js @@ -1134,12 +1134,6 @@ const visitModules = ( * @param {BlocksWithNestedBlocks} visited visited dependencies blocks */ const process = (current, visited) => { - if (visited.has(current)) { - return; - } - - visited.add(current); - const blockModules = getBlockModules(current, runtime); if (blockModules === undefined) { return; @@ -1153,6 +1147,11 @@ const visitModules = ( continue; } const refModule = /** @type {Module} */ (blockModules[i]); + if (visited.has(refModule)) { + continue; + } + + visited.add(refModule); if (refModule) { chunkGroup.setModulePreOrderIndex(refModule, preOrderIndex++);
diff --git a/test/configCases/chunk-index/recalc-index/a.css b/test/configCases/chunk-index/recalc-index/a.css new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-index/recalc-index/a.css @@ -0,0 +1,3 @@ +.a { + color: red; +} diff --git a/test/configCases/chunk-index/recalc-index/b.css b/test/configCases/chunk-index/recalc-index/b.css new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-index/recalc-index/b.css @@ -0,0 +1,5 @@ +@import './a.css'; + +.b { + color: blue; +} diff --git a/test/configCases/chunk-index/recalc-index/dynamic.js b/test/configCases/chunk-index/recalc-index/dynamic.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-index/recalc-index/dynamic.js @@ -0,0 +1,2 @@ +import './b.css' +import './a.css' diff --git a/test/configCases/chunk-index/recalc-index/index.js b/test/configCases/chunk-index/recalc-index/index.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-index/recalc-index/index.js @@ -0,0 +1,3 @@ +it('should compile', async () => { + await import(/* webpackChunkName: 'dynamic' */ './dynamic') +}) diff --git a/test/configCases/chunk-index/recalc-index/webpack.config.js b/test/configCases/chunk-index/recalc-index/webpack.config.js new file mode 100644 --- /dev/null +++ b/test/configCases/chunk-index/recalc-index/webpack.config.js @@ -0,0 +1,55 @@ +/** @typedef {import("../../../../types").Compilation} Compilation */ +/** @typedef {import("../../../../types").Module} Module */ +/** @type {import("../../../../types").Configuration} */ +module.exports = { + entry: { + main: "./index.js" + }, + experiments: { + css: true + }, + plugins: [ + function () { + /** + * @param {Compilation} compilation compilation + * @returns {void} + */ + const handler = compilation => { + compilation.hooks.afterSeal.tap("testcase", () => { + const data = {}; + for (const [name, group] of compilation.namedChunkGroups) { + /** @type {Map<Module, number>} */ + const modules = new Map(); + for (const chunk of group.chunks) { + for (const module of compilation.chunkGraph.getChunkModulesIterable( + chunk + )) { + const postOrder = group.getModulePostOrderIndex(module); + if (typeof postOrder === "number") { + modules.set(module, postOrder); + } + } + } + const sortedModules = Array.from(modules).sort((a, b) => { + return a[1] - b[1]; + }); + const text = sortedModules + .map( + ([m, index]) => + `${index}: ${m.readableIdentifier( + compilation.requestShortener + )}` + ) + .join(", "); + data[name + "Index"] = text; + } + expect(data).toEqual({ + dynamicIndex: "0: css ./a.css, 1: css ./b.css, 2: ./dynamic.js", + mainIndex: "0: ./index.js" + }); + }); + }; + this.hooks.compilation.tap("testcase", handler); + } + ] +};
@import CSS at-rule not always respected when determining order <!-- Please don't delete this template because we'll close your issue --> <!-- Before creating an issue please make sure you are using the latest version of webpack. --> # Bug report <!-- Please ask questions on StackOverflow or the GitHub Discussions. --> <!-- https://github.com/webpack/webpack/discussions --> <!-- https://stackoverflow.com/questions/ask?tags=webpack --> <!-- Issues which contain questions or support requests will be closed. --> **What is the current behavior?** Despite `a.css` being imported by `b.css`, it comes later in the resulting css file. **If the current behavior is a bug, please provide the steps to reproduce.** https://github.com/Hypnosphi/webpack-css-order ``` npm install npm run build ``` <!-- A great way to do this is to provide your configuration via a GitHub repository --> <!-- The most helpful is a minimal reproduction with instructions on how to reproduce --> <!-- Repositories with too many files or large `webpack.config.js` files are not suitable --> <!-- Please only add small code snippets directly into this issue --> <!-- https://gist.github.com is a good place for longer code snippets --> <!-- If your issue is caused by a plugin or loader, please create an issue on the loader/plugin repository instead --> **What is the expected behavior?** `a.css` should go before `b.css` because it's imported. The [spec](https://drafts.csswg.org/css-cascade-5/#at-import) explicitly states that the imported rules should be treated as if they were written in place of the `@import` rule. It works OK in 5.90.0. The issue is probably caused by https://github.com/webpack/webpack/pull/18009 <!-- "It should work" is not a helpful explanation --> <!-- Explain exactly how it should behave --> **Other relevant information:** webpack version: 5.90.3 Node.js version: 18.18.2 Operating System: macOS
Hi there , I would like to work on this issue , I can't access this page though https://github.com/Hypnosphi/webpack-css-order , getting 404 Sorry, it should be public now /cc @JSerFeng friendly ping /cc @ahabhgk How do rspack solve this problem? @Hypnosphi Anyway I recommend to avoid rely on CSS order when you have multiple `import ... from ...` and `@import`, there are a lot of limitations I believe the same problem exists in Rspack. I will look into this
"2024-03-14T07:15:37Z"
5.90
[]
[ "test/ConfigTestCases.basictest.js" ]
JavaScript
[]
[]
webpack/webpack
18,319
webpack__webpack-18319
[ "18278" ]
e97af9b5317bc0e7fdbc035b98a577edfe258b83
diff --git a/lib/DefinePlugin.js b/lib/DefinePlugin.js --- a/lib/DefinePlugin.js +++ b/lib/DefinePlugin.js @@ -27,6 +27,7 @@ const createHash = require("./util/createHash"); /** @typedef {import("./NormalModule")} NormalModule */ /** @typedef {import("./RuntimeTemplate")} RuntimeTemplate */ /** @typedef {import("./javascript/JavascriptParser")} JavascriptParser */ +/** @typedef {import("./javascript/JavascriptParser").DestructuringAssignmentProperty} DestructuringAssignmentProperty */ /** @typedef {import("./javascript/JavascriptParser").Range} Range */ /** @typedef {import("./logging/Logger").Logger} Logger */ @@ -114,6 +115,15 @@ class RuntimeValue { } } +/** + * @param {Set<DestructuringAssignmentProperty> | undefined} properties properties + * @returns {Set<string> | undefined} used keys + */ +function getObjKeys(properties) { + if (!properties) return undefined; + return new Set([...properties].map(p => p.id)); +} + /** * @param {any[]|{[k: string]: any}} obj obj * @param {JavascriptParser} parser Parser @@ -491,7 +501,7 @@ class DefinePlugin { runtimeTemplate, logger, !parser.isAsiPosition(/** @type {Range} */ (expr.range)[0]), - parser.destructuringAssignmentPropertiesFor(expr) + null ); if (parser.scope.inShorthand) { @@ -597,7 +607,7 @@ class DefinePlugin { runtimeTemplate, logger, !parser.isAsiPosition(/** @type {Range} */ (expr.range)[0]), - parser.destructuringAssignmentPropertiesFor(expr) + getObjKeys(parser.destructuringAssignmentPropertiesFor(expr)) ); if (parser.scope.inShorthand) { diff --git a/lib/dependencies/HarmonyImportDependencyParserPlugin.js b/lib/dependencies/HarmonyImportDependencyParserPlugin.js --- a/lib/dependencies/HarmonyImportDependencyParserPlugin.js +++ b/lib/dependencies/HarmonyImportDependencyParserPlugin.js @@ -29,6 +29,7 @@ const HarmonyImportSpecifierDependency = require("./HarmonyImportSpecifierDepend /** @typedef {import("../Dependency").DependencyLocation} DependencyLocation */ /** @typedef {import("../javascript/BasicEvaluatedExpression")} BasicEvaluatedExpression */ /** @typedef {import("../javascript/JavascriptParser")} JavascriptParser */ +/** @typedef {import("../javascript/JavascriptParser").DestructuringAssignmentProperty} DestructuringAssignmentProperty */ /** @typedef {import("../javascript/JavascriptParser").Range} Range */ /** @typedef {import("../optimize/InnerGraph").InnerGraph} InnerGraph */ /** @typedef {import("../optimize/InnerGraph").TopLevelSymbol} TopLevelSymbol */ diff --git a/lib/dependencies/HarmonyImportSpecifierDependency.js b/lib/dependencies/HarmonyImportSpecifierDependency.js --- a/lib/dependencies/HarmonyImportSpecifierDependency.js +++ b/lib/dependencies/HarmonyImportSpecifierDependency.js @@ -6,6 +6,7 @@ "use strict"; const Dependency = require("../Dependency"); +const Template = require("../Template"); const { getDependencyUsedByExportsCondition } = require("../optimize/InnerGraph"); @@ -28,6 +29,7 @@ const HarmonyImportDependency = require("./HarmonyImportDependency"); /** @typedef {import("../ModuleGraphConnection").ConnectionState} ConnectionState */ /** @typedef {import("../WebpackError")} WebpackError */ /** @typedef {import("../javascript/JavascriptParser").Attributes} Attributes */ +/** @typedef {import("../javascript/JavascriptParser").DestructuringAssignmentProperty} DestructuringAssignmentProperty */ /** @typedef {import("../javascript/JavascriptParser").Range} Range */ /** @typedef {import("../serialization/ObjectMiddleware").ObjectDeserializerContext} ObjectDeserializerContext */ /** @typedef {import("../serialization/ObjectMiddleware").ObjectSerializerContext} ObjectSerializerContext */ @@ -46,8 +48,8 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { * @param {string} name name * @param {Range} range range * @param {TODO} exportPresenceMode export presence mode - * @param {Attributes=} attributes assertions - * @param {Range[]=} idRanges ranges for members of ids; the two arrays are right-aligned + * @param {Attributes | undefined} attributes assertions + * @param {Range[] | undefined} idRanges ranges for members of ids; the two arrays are right-aligned */ constructor( request, @@ -73,7 +75,7 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { this.asiSafe = undefined; /** @type {Set<string> | boolean | undefined} */ this.usedByExports = undefined; - /** @type {Set<string> | undefined} */ + /** @type {Set<DestructuringAssignmentProperty> | undefined} */ this.referencedPropertiesInDestructuring = undefined; } @@ -144,8 +146,7 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { */ getReferencedExports(moduleGraph, runtime) { let ids = this.getIds(moduleGraph); - if (ids.length === 0) - return this._getReferencedExportsInDestructuring(moduleGraph); + if (ids.length === 0) return this._getReferencedExportsInDestructuring(); let namespaceObjectAsContext = this.namespaceObjectAsContext; if (ids[0] === "default") { const selfModule = moduleGraph.getParentModule(this); @@ -162,7 +163,7 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { case "default-only": case "default-with-named": if (ids.length === 1) - return this._getReferencedExportsInDestructuring(moduleGraph); + return this._getReferencedExportsInDestructuring(); ids = ids.slice(1); namespaceObjectAsContext = true; break; @@ -180,31 +181,19 @@ class HarmonyImportSpecifierDependency extends HarmonyImportDependency { ids = ids.slice(0, -1); } - return this._getReferencedExportsInDestructuring(moduleGraph, ids); + return this._getReferencedExportsInDestructuring(ids); } /** - * @param {ModuleGraph} moduleGraph module graph * @param {string[]=} ids ids - * @returns {(string[] | ReferencedExport)[]} referenced exports + * @returns {string[][]} referenced exports */ - _getReferencedExportsInDestructuring(moduleGraph, ids) { + _getReferencedExportsInDestructuring(ids) { if (this.referencedPropertiesInDestructuring) { - /** @type {ReferencedExport[]} */ + /** @type {string[][]} */ const refs = []; - const importedModule = moduleGraph.getModule(this); - const canMangle = - Array.isArray(ids) && - ids.length > 0 && - !moduleGraph - .getExportsInfo(importedModule) - .getExportInfo(ids[0]) - .isReexport(); - for (const key of this.referencedPropertiesInDestructuring) { - refs.push({ - name: ids ? ids.concat([key]) : [key], - canMangle - }); + for (const { id } of this.referencedPropertiesInDestructuring) { + refs.push(ids ? ids.concat([id]) : [id]); } return refs; } else { @@ -336,16 +325,11 @@ HarmonyImportSpecifierDependency.Template = class HarmonyImportSpecifierDependen // Skip rendering depending when dependency is conditional if (connection && !connection.isTargetActive(runtime)) return; + const ids = dep.getIds(moduleGraph); const { trimmedRange: [trimmedRangeStart, trimmedRangeEnd], trimmedIds - } = getTrimmedIdsAndRange( - dep.getIds(moduleGraph), - dep.range, - dep.idRanges, - moduleGraph, - dep - ); + } = getTrimmedIdsAndRange(ids, dep.range, dep.idRanges, moduleGraph, dep); const exportExpr = this._getCodeForIds( dep, @@ -358,6 +342,33 @@ HarmonyImportSpecifierDependency.Template = class HarmonyImportSpecifierDependen } else { source.replace(trimmedRangeStart, trimmedRangeEnd - 1, exportExpr); } + + if (dep.referencedPropertiesInDestructuring) { + for (let { + id, + shorthand, + range + } of dep.referencedPropertiesInDestructuring) { + const concatedIds = ids.concat([id]); + if (concatedIds[0] === "default") concatedIds.shift(); + const module = moduleGraph.getModule(dep); + const used = moduleGraph + .getExportsInfo(module) + .getUsedName(concatedIds, runtime); + if (!used) return; + const newName = used[used.length - 1]; + const name = concatedIds[concatedIds.length - 1]; + if (newName === name) continue; + + const comment = Template.toNormalComment(name) + " "; + const key = comment + JSON.stringify(newName); + source.replace( + range[0], + range[1] - 1, + shorthand ? `${key}: ${name}` : `${key}` + ); + } + } } /** diff --git a/lib/dependencies/ImportMetaPlugin.js b/lib/dependencies/ImportMetaPlugin.js --- a/lib/dependencies/ImportMetaPlugin.js +++ b/lib/dependencies/ImportMetaPlugin.js @@ -131,7 +131,7 @@ class ImportMetaPlugin { } let str = ""; - for (const prop of referencedPropertiesInDestructuring) { + for (const { id: prop } of referencedPropertiesInDestructuring) { switch (prop) { case "url": str += `url: ${importMetaUrl()},`; diff --git a/lib/dependencies/ImportParserPlugin.js b/lib/dependencies/ImportParserPlugin.js --- a/lib/dependencies/ImportParserPlugin.js +++ b/lib/dependencies/ImportParserPlugin.js @@ -256,7 +256,9 @@ class ImportParserPlugin { ) ); } - exports = exportsFromEnumerable(referencedPropertiesInDestructuring); + exports = exportsFromEnumerable( + [...referencedPropertiesInDestructuring].map(({ id }) => id) + ); } if (param.isString()) { diff --git a/lib/javascript/JavascriptParser.js b/lib/javascript/JavascriptParser.js --- a/lib/javascript/JavascriptParser.js +++ b/lib/javascript/JavascriptParser.js @@ -147,6 +147,13 @@ class VariableInfo { /** @typedef {[number, number]} Range */ +/** + * @typedef {Object} DestructuringAssignmentProperty + * @property {string} id + * @property {Range | undefined=} range + * @property {boolean | string} shorthand + */ + /** * Helper function for joining two ranges into a single range. This is useful * when working with AST nodes, as it allows you to combine the ranges of child nodes @@ -437,7 +444,7 @@ class JavascriptParser extends Parser { this.statementPath = undefined; /** @type {Statement | ModuleDeclaration | Expression | undefined} */ this.prevStatement = undefined; - /** @type {WeakMap<Expression, Set<string>> | undefined} */ + /** @type {WeakMap<Expression, Set<DestructuringAssignmentProperty>> | undefined} */ this.destructuringAssignmentProperties = undefined; this.currentTagData = undefined; this.magicCommentContext = vm.createContext(undefined, { @@ -1701,7 +1708,7 @@ class JavascriptParser extends Parser { /** * @param {Expression} node node - * @returns {Set<string>|undefined} destructured identifiers + * @returns {Set<DestructuringAssignmentProperty> | undefined} destructured identifiers */ destructuringAssignmentPropertiesFor(node) { if (!this.destructuringAssignmentProperties) return undefined; @@ -2332,7 +2339,7 @@ class JavascriptParser extends Parser { // check multiple assignments if (this.destructuringAssignmentProperties.has(expression)) { const set = - /** @type {Set<string>} */ + /** @type {Set<DestructuringAssignmentProperty>} */ (this.destructuringAssignmentProperties.get(expression)); this.destructuringAssignmentProperties.delete(expression); for (const id of set) keys.add(id); @@ -2627,30 +2634,43 @@ class JavascriptParser extends Parser { /** * @param {ObjectPattern} objectPattern object pattern - * @returns {Set<string> | undefined} set of names or undefined if not all keys are identifiers + * @returns {Set<DestructuringAssignmentProperty> | undefined} set of names or undefined if not all keys are identifiers */ _preWalkObjectPattern(objectPattern) { - const ids = new Set(); + /** @type {Set<DestructuringAssignmentProperty>} */ + const props = new Set(); const properties = objectPattern.properties; for (let i = 0; i < properties.length; i++) { const property = properties[i]; if (property.type !== "Property") return; + if (property.shorthand && property.value.type === "Identifier") { + this.scope.inShorthand = property.value.name; + } const key = property.key; if (key.type === "Identifier") { - ids.add(key.name); + props.add({ + id: key.name, + range: key.range, + shorthand: this.scope.inShorthand + }); } else { const id = this.evaluateExpression(/** @type {TODO} */ (key)); const str = id.asString(); if (str) { - ids.add(str); + props.add({ + id: str, + range: key.range, + shorthand: this.scope.inShorthand + }); } else { // could not evaluate key return; } } + this.scope.inShorthand = false; } - return ids; + return props; } /** diff --git a/types.d.ts b/types.d.ts --- a/types.d.ts +++ b/types.d.ts @@ -3301,6 +3301,17 @@ declare abstract class DependencyTemplates { getHash(): string; clone(): DependencyTemplates; } + +/** + * Helper function for joining two ranges into a single range. This is useful + * when working with AST nodes, as it allows you to combine the ranges of child nodes + * to create the range of the _parent node_. + */ +declare interface DestructuringAssignmentProperty { + id: string; + range?: [number, number]; + shorthand: string | boolean; +} declare class DeterministicChunkIdsPlugin { constructor(options?: DeterministicChunkIdsPluginOptions); options: DeterministicChunkIdsPluginOptions; @@ -5946,12 +5957,15 @@ declare class JavascriptParser extends Parser { | ExportNamedDeclaration | ExportDefaultDeclaration | ExportAllDeclaration; - destructuringAssignmentProperties?: WeakMap<Expression, Set<string>>; + destructuringAssignmentProperties?: WeakMap< + Expression, + Set<DestructuringAssignmentProperty> + >; currentTagData: any; magicCommentContext: Context; destructuringAssignmentPropertiesFor( node: Expression - ): undefined | Set<string>; + ): undefined | Set<DestructuringAssignmentProperty>; getRenameIdentifier( expr: Expression ): undefined | string | VariableInfoInterface;
diff --git a/test/configCases/mangle/exports-info-can-mangle/index.js b/test/configCases/mangle/exports-info-can-mangle/index.js --- a/test/configCases/mangle/exports-info-can-mangle/index.js +++ b/test/configCases/mangle/exports-info-can-mangle/index.js @@ -8,7 +8,7 @@ it("__webpack_exports_info__.xxx.canMangle should be correct", () => { const { bbb, bbbCanMangle } = b; expect(bbb).toBe("bbb"); - expect(bbbCanMangle).toBe(false); + expect(bbbCanMangle).toBe(true); expect(caCanMangle).toBe(true); expect(cbCanMangle).toBe(true); diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/data.json b/test/configCases/mangle/mangle-with-destructuring-assignment/data.json new file mode 100644 --- /dev/null +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/data.json @@ -0,0 +1,19 @@ +{ + "obj": { + "arr": [ + { + "prop1": 1, + "prop2": 2 + }, + { + "prop3": 3, + "prop4": 4 + }, + { + "prop5": 5, + "prop6": 6 + } + ] + }, + "foo": "foo" +} diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/index.js b/test/configCases/mangle/mangle-with-destructuring-assignment/index.js --- a/test/configCases/mangle/mangle-with-destructuring-assignment/index.js +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/index.js @@ -1,11 +1,14 @@ import * as module from "./module"; import { obj3, obj3CanMangle, obj4, obj4CanMangle } from "./reexport?side-effects" // enable side effects to ensure reexport is not skipped +import data from "./data.json"; +import data2 from "./data.json?2"; +import path from "path"; -it("should not mangle export when destructuring module", () => { +it("should mangle export when destructuring module", () => { const { obj: { a, b }, objCanMangle } = module expect(a).toBe("a"); expect(b).toBe("b"); - expect(objCanMangle).toBe(false) + expect(objCanMangle).toBe(true) }); it("should mangle export when destructuring module's property", () => { @@ -21,17 +24,65 @@ it("should mangle export when using module dot property", () => { expect(module.aaaCanMangle).toBe(true) }); -it("should not mangle export when destructuring module's property is a module", () => { +it("should mangle export when destructuring module's property is a module", () => { const { aaa, bbb } = obj3; expect(aaa).toBe("a"); expect(bbb).toBe("b"); - expect(obj3CanMangle).toBe(false) + expect(obj3CanMangle).toBe(true) }); -it("should not mangle export when destructuring module's nested property is a module", () => { +it("should not mangle export when destructuring module's nested property is a module (used in unknown way)", () => { const { nested: { obj5, obj5CanMangle } } = obj4; expect(obj5.aaa).toBe("a"); expect(obj5.bbb).toBe("b"); expect(obj4CanMangle).toBe(true); - expect(obj5CanMangle).toBe(false) + expect(obj5CanMangle).toBe(false); // obj5 is used in unknown way +}); + +it("should mangle when destructuring json", async () => { + const { obj: { + "arr": [ + { prop1: p1 = 0 } + ] + } } = data; + expect(p1).toBe(1); + + const values = []; + ({ + foo: values[0], + obj: { + ["a" + "r" + "r"]: { + length: values[1], + } + } + } = data); + expect(values[0]).toBe("foo"); + expect(values[1]).toBe(3); + + const generatedJson = __non_webpack_require__(path.resolve(__dirname, "data.json.js")); + expect(generatedJson).toEqual({ + "W": { + "arr": [ + { "prop1": 1, "prop2": 2 }, + { "prop3": 3, "prop4": 4 }, + { "prop5": 5, "prop6": 6 } + ] + }, + "p": "foo" + }); +}); + +it("should mangle when destructuring json 2", async () => { + const { prop1, prop2 } = data2.obj.arr[0]; + expect(prop1).toBe(1); + expect(prop2).toBe(2); + + const generatedJson = __non_webpack_require__(path.resolve(__dirname, "data.json_2.js")); + expect(generatedJson).toEqual({ + "W": { + "Q": [ + { "X": 1, "Q": 2 }, + ], + } + }); }); diff --git a/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js b/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js --- a/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js +++ b/test/configCases/mangle/mangle-with-destructuring-assignment/webpack.config.js @@ -1,3 +1,5 @@ +// const { getRuntimeKey } = require("../../../../lib/util/runtime"); + /** @type {import("../../../../").Configuration} */ module.exports = { module: { @@ -11,6 +13,37 @@ module.exports = { optimization: { mangleExports: true, usedExports: true, - providedExports: true - } + providedExports: true, + concatenateModules: false + }, + plugins: [ + function getJsonCodeGeneratedSource(compiler) { + compiler.hooks.compilation.tap( + getJsonCodeGeneratedSource.name, + compilation => { + compilation.hooks.processAssets.tap( + getJsonCodeGeneratedSource.name, + () => { + for (const module of compilation.modules) { + if (module.type === "json") { + const { sources } = compilation.codeGenerationResults.get( + module, + "main" + ); + const source = sources.get("javascript"); + const file = compilation.getAssetPath("[name].js", { + filename: + module + .readableIdentifier(compilation.requestShortener) + .replace(/[?#]/g, "_") + ".js" + }); + compilation.emitAsset(file, source); + } + } + } + ); + } + ); + } + ] };
mangled exports breaks with destructuring assignment of JSON imports <!-- Please don't delete this template because we'll close your issue --> <!-- Before creating an issue please make sure you are using the latest version of webpack. --> # Bug report <!-- Please ask questions on StackOverflow or the GitHub Discussions. --> <!-- https://github.com/webpack/webpack/discussions --> <!-- https://stackoverflow.com/questions/ask?tags=webpack --> <!-- Issues which contain questions or support requests will be closed. --> **What is the current behavior?** As of 5.90.2 (including 5.91.0), destructuring assignment on a JSON import does not work because the property names have been changed (using default settings). Setting `optimization.mangleExports` to `false` fixes this, but it worked fine in 5.90.1 and it works fine accessing properties with dot notation in 5.90.2+. Essentially the following: ``` import data from './data.json'; // this works ... console.log(data.foo.bar) // ...but this does not const { bar } = data.foo; ``` **If the current behavior is a bug, please provide the steps to reproduce.** https://github.com/ikydd-bbc/spike-webpack-minify-bug <!-- A great way to do this is to provide your configuration via a GitHub repository --> <!-- The most helpful is a minimal reproduction with instructions on how to reproduce --> <!-- Repositories with too many files or large `webpack.config.js` files are not suitable --> <!-- Please only add small code snippets directly into this issue --> <!-- https://gist.github.com is a good place for longer code snippets --> <!-- If your issue is caused by a plugin or loader, please create an issue on the loader/plugin repository instead --> **What is the expected behavior?** Destructuring of JSON imports should work as they did previously in 5.90.1. We should be able to import a JSON file using the default export and then use destructuring assignment to pick out whatever properties we choose. <!-- "It should work" is not a helpful explanation --> <!-- Explain exactly how it should behave --> **Other relevant information:** webpack version: 5.90.2 + Node.js version: 18.19.0 Operating System: OSX Additional tools:
Thank you for the issue, do you want to send a pr? > Thank you for the issue, do you want to send a pr? Unfortunately I have no experience working with webpack internals or the details of project goals, so while I might be curious and have a dig to see what's what I couldn't commit to trying to fix this. /cc @ahabhgk What do you think? I don't like named export with JSON at all, because it volates a spec - there is no named export with JSON files, only `default` (should we output a warning on it? with `futureDefaults: true`), maybe we should disable mangle at all for JSON modules? I think there is already have a warning for named export with JSON https://github.com/webpack/webpack/blob/7090328ba33f8887db2dcf41767ba72f7bfb90bd/lib/json/JsonParser.js#L62-L63 What about disable named export with JSON in next major and when `futureDefaults: true`? I will take a look and try to fix this bug in the next few days @ikydd-bbc Just intresting, why you used named export for JSON? Legacy code? @alexander-akait The use case shown is not really a named export from JSON though, is it? It uses a default import and then destructures the already imported default. > ``` > import data from './data.json'; > > // this works ... > console.log(data.foo.bar) > > // ...but this does not > const { bar } = data.foo; > ``` @alopix Oh, I see, my mistake, sometimes the eyes are wrong :smile: We will look at this soon
"2024-04-12T18:45:18Z"
5.91
[]
[ "test/ConfigTestCases.basictest.js" ]
JavaScript
[]
[]
jestjs/jest
10,981
jestjs__jest-10981
[ "10577", "10881" ]
c2f152d9da6298a97a85233b2e754a1cbe7e0a61
diff --git a/packages/jest-runner/src/index.ts b/packages/jest-runner/src/index.ts --- a/packages/jest-runner/src/index.ts +++ b/packages/jest-runner/src/index.ts @@ -166,7 +166,12 @@ export default class TestRunner { const worker = new Worker(TEST_WORKER_PATH, { exposedMethods: ['worker'], - forkOptions: {stdio: 'pipe'}, + forkOptions: { + // use advanced serialization in order to transfer objects with circular references + // @ts-expect-error: option does not exist on the node 10 types + serialization: 'advanced', + stdio: 'pipe', + }, maxRetries: 3, numWorkers: this._globalConfig.maxWorkers, setupArgs: [
diff --git a/e2e/__tests__/__snapshots__/circularInequality.test.ts.snap b/e2e/__tests__/__snapshots__/circularInequality.test.ts.snap new file mode 100644 --- /dev/null +++ b/e2e/__tests__/__snapshots__/circularInequality.test.ts.snap @@ -0,0 +1,53 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`handles circular inequality properly 1`] = ` +FAIL __tests__/test-1.js + ● test + + expect(received).toEqual(expected) // deep equality + + - Expected - 1 + + Received + 3 + + - Object {} + + Object { + + "ref": [Circular], + + } + + 3 | foo.ref = foo; + 4 | + > 5 | expect(foo).toEqual({}); + | ^ + 6 | }); + + at Object.toEqual (__tests__/test-1.js:5:15) + +FAIL __tests__/test-2.js + ● test + + expect(received).toEqual(expected) // deep equality + + - Expected - 1 + + Received + 3 + + - Object {} + + Object { + + "ref": [Circular], + + } + + 3 | foo.ref = foo; + 4 | + > 5 | expect(foo).toEqual({}); + | ^ + 6 | }); + + at Object.toEqual (__tests__/test-2.js:5:15) +`; + +exports[`handles circular inequality properly 2`] = ` +Test Suites: 2 failed, 2 total +Tests: 2 failed, 2 total +Snapshots: 0 total +Time: <<REPLACED>> +Ran all test suites. +`; diff --git a/e2e/__tests__/circularInequality.test.ts b/e2e/__tests__/circularInequality.test.ts new file mode 100644 --- /dev/null +++ b/e2e/__tests__/circularInequality.test.ts @@ -0,0 +1,58 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import {tmpdir} from 'os'; +import * as path from 'path'; +import {wrap} from 'jest-snapshot-serializer-raw'; +import { + cleanup, + createEmptyPackage, + extractSortedSummary, + writeFiles, +} from '../Utils'; +import {runContinuous} from '../runJest'; + +const tempDir = path.resolve(tmpdir(), 'circular-inequality-test'); + +beforeEach(() => { + createEmptyPackage(tempDir); +}); + +afterEach(() => { + cleanup(tempDir); +}); + +test('handles circular inequality properly', async () => { + const testFileContent = ` + it('test', () => { + const foo = {}; + foo.ref = foo; + + expect(foo).toEqual({}); + }); + `; + + writeFiles(tempDir, { + '__tests__/test-1.js': testFileContent, + '__tests__/test-2.js': testFileContent, + }); + + const {end, waitUntil} = runContinuous( + tempDir, + ['--no-watchman', '--watch-all'], + // timeout in case the `waitUntil` below doesn't fire + {stripAnsi: true, timeout: 5000}, + ); + + await waitUntil(({stderr}) => stderr.includes('Ran all test suites.')); + + const {stderr} = await end(); + + const {summary, rest} = extractSortedSummary(stderr); + expect(wrap(rest)).toMatchSnapshot(); + expect(wrap(summary)).toMatchSnapshot(); +});
Circular references hang jest when assertions fail on node 14 <!-- Love Jest? Please consider supporting our collective: πŸ‘‰ https://opencollective.com/jest/donate --> ## πŸ› Bug Report When an assertion fails where either the expected or actual value is circular, and both values are objects, jest encounters an error stating it failed to convert a circular structure to JSON, resulting in the test run not completing. ## To Reproduce ```ts it("test", () => { const foo = {}; foo.ref = foo; expect(foo).toEqual({}); }); ``` Running jest gives me the following error: ``` (node:11685) UnhandledPromiseRejectionWarning: TypeError: Converting circular structure to JSON --> starting at object with constructor 'Object' --- property 'ref' closes the circle at stringify (<anonymous>) at writeChannelMessage (internal/child_process/serialization.js:117:20) at process.target._send (internal/child_process.js:804:17) at process.target.send (internal/child_process.js:702:19) at reportSuccess (/Users/verit/basic-jsx/node_modules/jest-worker/build/workers/processChild.js:67:11) ``` Jest continues running indefinitely (I only tested up to ten minutes) and reports nothing regarding the test suite. I traced this to the [added `failureDetails` property on error messages](https://github.com/facebook/jest/pull/9496/files#diff-0755289e4b7e144aa0ef6f59f5bcf748R436), landed in [26.3.0](https://github.com/facebook/jest/releases/tag/v26.3.0). ## Expected behavior I'd expect the test to fail and jest to complete running. ## envinfo I only tested two versions. The above error occurs on 14.9.0, but does not on 12.16.1. ``` System: OS: macOS 10.15.6 CPU: (8) x64 Intel(R) Core(TM) i7-7920HQ CPU @ 3.10GHz Binaries: Node: 14.9.0 - ~/.nodenv/versions/14.9.0/bin/node npm: 6.14.8 - ~/.nodenv/versions/14.9.0/bin/npm npmPackages: jest: ^26.4.2 => 26.4.2 ``` fix(jest-worker): Remove circular references from messages sent to workers Fixes #10577 <!-- Thanks for submitting a pull request! Please provide enough information so that others can review your pull request. The two fields below are mandatory. --> <!-- Please remember to update CHANGELOG.md at the root of the project if you have not done so. --> ## Summary This fix prevents objects with circular references to be sent to worker processes. Node uses `JSON.stringify` to serialize such messages and this causes an error in case there are circular references in the messages. Please note I added new dependency, `fclone`, though I'm not sure it's allowed or if there already is another such function in the repository. ## Test plan This fix allows to run the tests described in the issue linked. The repro setup can be found at https://repl.it/@Frantiekiaik/jest-playground-1.
Is there any workaround? @Lonli-Lokli Looks like the only workaround for now is `--detectOpenHandles ` However, this causes a massive decrease in performance. I hope this can be fixed soon. I have migrated several of our projects to jest... and now it causes hiccups all throughout our build system when one test breaks. It would be nice if `--testTimeout` worked in this scenario... but it still allows the test to just hang until Jenkins or circle ci times out. I can reproduce this when I have two such tests (in separate files - but not sure if this matters) and I have to run it with `--watch`. Here, however, it can be reproduced even without `--watch`: https://repl.it/@Frantiekiaik/jest-playground-1 I just ran into what I assume is the same issue on Node 10.16.0 and Jest 26.6.2. I can't reproduce it as written, but if I make a file containing two copies of @voces's example test and run it in watch mode, I get the same error. The test: ```js it('test', () => { const foo = {}; foo.ref = foo; expect(foo).toEqual({}); }); it('test 2', () => { const foo = {}; foo.ref = foo; expect(foo).toEqual({}); }); ``` The error: ``` (node:58951) ExperimentalWarning: The fs.promises API is experimental (node:58951) UnhandledPromiseRejectionWarning: TypeError: Converting circular structure to JSON at JSON.stringify (<anonymous>) at process.target._send (internal/child_process.js:735:23) at process.target.send (internal/child_process.js:634:19) at reportSuccess (/Users/richardmunroe/analytics_ui/node_modules/jest-worker/build/workers/processChild.js:67:11) (node:58951) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1) (node:58951) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code. ``` Running with `--detectOpenHandles` makes the problem go away for me too. This is happening since #9496 Commit 918636142791a3dd4ddfe9367149a90437bd6da9 And it seems to happen because of some inter-process serialization. After adding `failureDetails` property, the serialization fails on cyclic references as it contains the objects being under test. Maybe some sanitization of the `failureDetails` property would help. Note that after commit 5f6f2ec8e17555b695d65ab68824926c77730216 which changes default runner to circus, the error message is ``` "messageParent" can only be used inside a worker at messageParent (packages/jest-worker/build/workers/messageParent.js:46:11) ``` with `JEST_JASMINE=1` the error is as before. Added a PR as an attempt to fix this. Also, please note that the `messageParent` error mentioned above is due to [this line](https://github.com/facebook/jest/blob/245a58278ce189019e4af3537b0f987bf9ff210e/packages/jest-worker/src/workers/messageParent.ts#L33) which swallows the real error message which is also about circular references. > Added a PR as an attempt to fix this. πŸŽ‰ > Also, please note that the `messageParent` error mentioned above is due to [this line](https://github.com/facebook/jest/blob/245a58278ce189019e4af3537b0f987bf9ff210e/packages/jest-worker/src/workers/messageParent.ts#L33) which swallows the real error message which is also about circular references. We should not swallow errors like that... We confirm it happens in Node 12, and it’s more common when using Angular Dependency Injection (I think they have cyclic structures in some error-states). The process hangs in such scenario, but this can be improved slightly be applying `--unhandled-rejection=strict` to nodejs script, instead running jest as separate binary. It helps `jest` to recover and fail suite (but it does not resolve cyclic reference of course). I can confirm this problem as very common with Angular DI with Node 12 as @piotrl mentioned. Using Jest 26.6.3 How can I run it with `--unhandled-rejection=strict` flag? ```json //package.json > scripts "test:unit": "node --unhandled-rejections=strict $(npm bin)/jest --env=jest-environment-jsdom-sixteen ", ``` this is actually stopping the execution, is that the right workaround? We'd want to restore the original object on the receiving side, no? Should we use something like https://github.com/storybookjs/telejson? @SimenB sounds reasonable. I'll try to rework. Would the ['advanced'](https://nodejs.org/api/child_process.html#child_process_advanced_serialization) serialization format be an alternative for use-cases that require circular references to avoid that all workers must pay for the overhead of `fclone`? This would also solve the issue where e.g. a child returns a structure containing circular references. I would suggest to not enable this option by default because it comes with a slight performance overhead. @MichaReiser I tried that, but it just changes the error to the same as worker threads ``` (node:24057) UnhandledPromiseRejectionWarning: Error: () => (0, _jestMatcherUtils.matcherHint)( matcherName, undefined, ...<omitted>... ) could not be cloned. at writeChannelMessage (internal/child_process/serialization.js:79:9) at process.target._send (internal/child_process.js:805:17) at process.target.send (internal/child_process.js:703:19) at reportSuccess (/Users/simen/repos/jest/packages/jest-worker/build/workers/processChild.js:67:11) ``` While using worker (which uses structured clone by default) ``` (node:24148) UnhandledPromiseRejectionWarning: DataCloneError: () => (0, _jestMatcherUtils.matcherHint)( matcherName, undefined, ...<omitted>... ) could not be cloned. at reportSuccess (/Users/simen/repos/jest/packages/jest-worker/build/workers/threadChild.js:77:32) ``` @SimenB that's rather interesting because the MDN documentation explicitly mentions circular references... > The structured clone algorithm copies complex JavaScript objects. It is used internally to transfer data between Workers via postMessage(), storing objects with IndexedDB, or copying objects for other APIs. It clones by recursing through the input object while maintaining a map of previously visited references, **to avoid infinitely traversing cycles**. [source](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Structured_clone_algorithm). What kind of object did you pass? Any chance it did use Proxies, contained Symbols, or used other features that make it not a plain JS object? [spec](https://html.spec.whatwg.org/multipage/structured-data.html#structuredserializeinternal) However, it is as it is. I still believe that it would make sense to change jest worker to allow passing in a custom serializer / deserialiser instead of requiring a custom serialization logic for all clients. Or manually calling `fclone` before invoking the method on the `jest-worker` / reading the value in the worker. E.g. I personally would prefer to get an error than being surprised by the fact that some properties in the worker/ in the result have been changed to `[Circular]` for no obvious reasons.
"2020-12-28T13:48:21Z"
27.0
[]
[ "e2e/__tests__/circularInequality.test.ts" ]
TypeScript
[]
[ "https://repl.it/@Frantiekiaik/jest-playground-1." ]
jestjs/jest
11,320
jestjs__jest-11320
[ "10741" ]
1ba867bebc91dd2ae2d0666a521c3892f8bc27ec
diff --git a/packages/jest-reporters/src/GitHubActionsReporter.ts b/packages/jest-reporters/src/GitHubActionsReporter.ts new file mode 100644 --- /dev/null +++ b/packages/jest-reporters/src/GitHubActionsReporter.ts @@ -0,0 +1,54 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import stripAnsi = require('strip-ansi'); +import type {AggregatedResult, TestResult} from '@jest/test-result'; +import BaseReporter from './BaseReporter'; +import type {Context} from './types'; + +const lineAndColumnInStackTrace = /^.*?:([0-9]+):([0-9]+).*$/; + +function replaceEntities(s: string): string { + // https://github.com/actions/toolkit/blob/b4639928698a6bfe1c4bdae4b2bfdad1cb75016d/packages/core/src/command.ts#L80-L85 + const substitutions: Array<[RegExp, string]> = [ + [/%/g, '%25'], + [/\r/g, '%0D'], + [/\n/g, '%0A'], + ]; + return substitutions.reduce((acc, sub) => acc.replace(...sub), s); +} + +export default class GitHubActionsReporter extends BaseReporter { + onRunComplete( + _contexts?: Set<Context>, + aggregatedResults?: AggregatedResult, + ): void { + const messages = getMessages(aggregatedResults?.testResults); + + for (const message of messages) { + this.log(message); + } + } +} + +function getMessages(results: Array<TestResult> | undefined) { + if (!results) return []; + + return results.flatMap(({testFilePath, testResults}) => + testResults + .filter(r => r.status === 'failed') + .flatMap(r => r.failureMessages) + .map(m => stripAnsi(m)) + .map(m => replaceEntities(m)) + .map(m => lineAndColumnInStackTrace.exec(m)) + .filter((m): m is RegExpExecArray => m !== null) + .map( + ([message, line, col]) => + `::error file=${testFilePath},line=${line},col=${col}::${message}`, + ), + ); +} diff --git a/packages/jest-reporters/src/index.ts b/packages/jest-reporters/src/index.ts --- a/packages/jest-reporters/src/index.ts +++ b/packages/jest-reporters/src/index.ts @@ -26,6 +26,7 @@ export {default as DefaultReporter} from './DefaultReporter'; export {default as NotifyReporter} from './NotifyReporter'; export {default as SummaryReporter} from './SummaryReporter'; export {default as VerboseReporter} from './VerboseReporter'; +export {default as GitHubActionsReporter} from './GitHubActionsReporter'; export type { Context, Reporter,
diff --git a/packages/jest-reporters/src/__tests__/GitHubActionsReporter.test.js b/packages/jest-reporters/src/__tests__/GitHubActionsReporter.test.js new file mode 100644 --- /dev/null +++ b/packages/jest-reporters/src/__tests__/GitHubActionsReporter.test.js @@ -0,0 +1,118 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ +'use strict'; + +let GitHubActionsReporter; + +const write = process.stderr.write; +const globalConfig = { + rootDir: 'root', + watch: false, +}; + +let results = []; + +function requireReporter() { + jest.isolateModules(() => { + GitHubActionsReporter = require('../GitHubActionsReporter').default; + }); +} + +beforeEach(() => { + process.stderr.write = result => results.push(result); +}); + +afterEach(() => { + results = []; + process.stderr.write = write; +}); + +const aggregatedResults = { + numFailedTestSuites: 1, + numFailedTests: 1, + numPassedTestSuites: 0, + numTotalTestSuites: 1, + numTotalTests: 1, + snapshot: { + added: 0, + didUpdate: false, + failure: false, + filesAdded: 0, + filesRemoved: 0, + filesRemovedList: [], + filesUnmatched: 0, + filesUpdated: 0, + matched: 0, + total: 0, + unchecked: 0, + uncheckedKeysByFile: [], + unmatched: 0, + updated: 0, + }, + startTime: 0, + success: false, + testResults: [ + { + numFailingTests: 1, + numPassingTests: 0, + numPendingTests: 0, + numTodoTests: 0, + openHandles: [], + perfStats: { + end: 1234, + runtime: 1234, + slow: false, + start: 0, + }, + skipped: false, + snapshot: { + added: 0, + fileDeleted: false, + matched: 0, + unchecked: 0, + uncheckedKeys: [], + unmatched: 0, + updated: 0, + }, + testFilePath: '/home/runner/work/jest/jest/some.test.js', + testResults: [ + { + ancestorTitles: [Array], + duration: 7, + failureDetails: [Array], + failureMessages: [ + ` + Error: \u001b[2mexpect(\u001b[22m\u001b[31mreceived\u001b[39m\u001b[2m).\u001b[22mtoBe\u001b[2m(\u001b[22m\u001b[32mexpected\u001b[39m\u001b[2m) // Object.is equality\u001b[22m\n + \n + Expected: \u001b[32m\"b\"\u001b[39m\n + Received: \u001b[31m\"a\"\u001b[39m\n + at Object.<anonymous> (/home/runner/work/jest/jest/some.test.js:4:17)\n + at Object.asyncJestTest (/home/runner/work/jest/jest/node_modules/jest-jasmine2/build/jasmineAsyncInstall.js:106:37)\n + at /home/runner/work/jest/jest/node_modules/jest-jasmine2/build/queueRunner.js:45:12\n + at new Promise (<anonymous>)\n + at mapper (/home/runner/work/jest/jest/node_modules/jest-jasmine2/build/queueRunner.js:28:19)\n + at /home/runner/work/jest/jest/node_modules/jest-jasmine2/build/queueRunner.js:75:41\n + at processTicksAndRejections (internal/process/task_queues.js:93:5) + `, + ], + fullName: 'asserts that a === b', + location: null, + numPassingAsserts: 0, + status: 'failed', + title: 'asserts that a === b', + }, + ], + }, + ], +}; + +test('reporter extracts the correct filename, line, and column', () => { + requireReporter(); + const testReporter = new GitHubActionsReporter(globalConfig); + testReporter.onRunComplete(new Set(), aggregatedResults); + expect(results.join('').replace(/\\/g, '/')).toMatchSnapshot(); +}); diff --git a/packages/jest-reporters/src/__tests__/__snapshots__/GitHubActionsReporter.test.js.snap b/packages/jest-reporters/src/__tests__/__snapshots__/GitHubActionsReporter.test.js.snap new file mode 100644 --- /dev/null +++ b/packages/jest-reporters/src/__tests__/__snapshots__/GitHubActionsReporter.test.js.snap @@ -0,0 +1,6 @@ +// Jest Snapshot v1, https://goo.gl/fbAQLP + +exports[`reporter extracts the correct filename, line, and column 1`] = ` +"::error file=/home/runner/work/jest/jest/some.test.js,line=4,col=17::%0A Error: expect(received).toBe(expected) // Object.is equality%0A%0A %0A%0A Expected: "b"%0A%0A Received: "a"%0A%0A at Object.<anonymous> (/home/runner/work/jest/jest/some.test.js:4:17)%0A%0A at Object.asyncJestTest (/home/runner/work/jest/jest/node_modules/jest-jasmine2/build/jasmineAsyncInstall.js:106:37)%0A%0A at /home/runner/work/jest/jest/node_modules/jest-jasmine2/build/queueRunner.js:45:12%0A%0A at new Promise (<anonymous>)%0A%0A at mapper (/home/runner/work/jest/jest/node_modules/jest-jasmine2/build/queueRunner.js:28:19)%0A%0A at /home/runner/work/jest/jest/node_modules/jest-jasmine2/build/queueRunner.js:75:41%0A%0A at processTicksAndRejections (internal/process/task_queues.js:93:5)%0A +" +`;
feat: Added GithubActionsReporter ## Summary This is based on recommendation and work done in https://github.com/stefanbuck/jest-matcher/pull/2 ## Test plan Testing is not implemented yet, this is just a placeholder pr
Hi @mariusGundersen! Thank you for your pull request and welcome to our community. We require contributors to sign our Contributor License Agreement, and we don't seem to have you on file. In order for us to review and merge your code, please sign at <https://code.facebook.com/cla>. **If you are contributing on behalf of someone else (eg your employer)**, the individual CLA may not be sufficient and your employer may need to sign the corporate CLA. If you have received this in error or have any questions, please contact us at [[email protected]](mailto:[email protected]?subject=CLA%20for%20facebook%2Fjest%20%2310741). Thanks! πŸ‘‹ Any news on this? This would be a great addition to Jest πŸŽ‰ @mariusGundersen, are you still working on this? Seems like there are a couple of style issues that have been flagged by the corresponding GH Action. It'd be great if these could be fixed and the PR could be set to 'Ready for review' 😊 πŸ™ I think it should be enough to run eslint to fix those. I'm mainly waiting for tests πŸ™‚
"2021-04-20T21:30:29Z"
28.0
[]
[ "packages/jest-reporters/src/__tests__/GitHubActionsReporter.test.js" ]
TypeScript
[]
[]
jestjs/jest
11,358
jestjs__jest-11358
[ "11296" ]
4c3643c60645a3c688920a41fa3cd21b981a3fd9
diff --git a/packages/jest-watcher/src/constants.ts b/packages/jest-watcher/src/constants.ts --- a/packages/jest-watcher/src/constants.ts +++ b/packages/jest-watcher/src/constants.ts @@ -15,6 +15,7 @@ export const KEYS = { BACKSPACE: Buffer.from(isWindows ? '08' : '7f', 'hex').toString(), CONTROL_C: '\u0003', CONTROL_D: '\u0004', + CONTROL_U: '\u0015', ENTER: '\r', ESCAPE: '\u001b', }; diff --git a/packages/jest-watcher/src/lib/Prompt.ts b/packages/jest-watcher/src/lib/Prompt.ts --- a/packages/jest-watcher/src/lib/Prompt.ts +++ b/packages/jest-watcher/src/lib/Prompt.ts @@ -89,6 +89,12 @@ export default class Prompt { case KEYS.ARROW_LEFT: case KEYS.ARROW_RIGHT: break; + case KEYS.CONTROL_U: + this._value = ''; + this._offset = -1; + this._selection = null; + this._onChange(); + break; default: this._value = key === KEYS.BACKSPACE ? this._value.slice(0, -1) : this._value + key;
diff --git a/packages/jest-watcher/src/lib/__tests__/prompt.test.ts b/packages/jest-watcher/src/lib/__tests__/prompt.test.ts --- a/packages/jest-watcher/src/lib/__tests__/prompt.test.ts +++ b/packages/jest-watcher/src/lib/__tests__/prompt.test.ts @@ -62,3 +62,20 @@ it('calls handler on cancel prompt', () => { expect(onCancel).toHaveBeenCalled(); }); + +it('clears the line when CONTROL_U is pressed', () => { + const prompt = new Prompt(); + const onChange = jest.fn(); + const options = {max: 10, offset: -1}; + + prompt.enter(onChange, jest.fn(), jest.fn()); + + prompt.put('t'); + prompt.put('e'); + prompt.put('s'); + prompt.put('t'); + expect(onChange).toHaveBeenLastCalledWith('test', options); + + prompt.put(KEYS.CONTROL_U); + expect(onChange).toHaveBeenLastCalledWith('', options); +});
<C-u> to clear watch mode pattern prompt <!-- Love Jest? Please consider supporting our collective: πŸ‘‰ https://opencollective.com/jest/donate --> ## πŸš€ Feature Proposal Currently, pressing Ctrl+u in a watch mode pattern prompt inserts an invisible character that also somewhat breaks searching because the files/tests searched for do not contain the character. In most terminal environments, Ctrl+u clears the whole line. I think this should happen in the watch mode pattern prompt as well. ## Motivation Many users will expect Ctrl+u to behave as elsewhere in their terminal environment. The workaround requires pressing backspace a lot to clear the line manually. ## Pitch Why does this feature belong in the [Jest core platform](https://www.youtube.com/watch?v=NtjyeojAOBs)? Since `jest-watcher`s `Prompt` is reused in third-party packages like `jest-watch-typeahead`, making the improvement here would benefit those as well as users of plain Jest watch mode prompts.
This is a Bash thing, but it probably works in most shell emulators (it does in zsh which I use at least). If we add ctrl+u, we should also do others, like meta/ctrl+b and meta/ctrl+f etc. Feels like a somewhat slippery slope, but perhaps there's some library we can use which parses the input and returns a "command" of some sort that's easier to understand? Or even better would be some "cursor" library which, given a string and a position can parse terminal input and return a new string (if it modifies it) and cursor position. I've done zero research into whether such a thing already exists πŸ™‚ > This is a Bash thing, but it probably works in most shell emulators (it does in zsh which I use at least). Yeah, but it's also supported in many non-shell places in "the terminal", like in Vim insert mode, when editing a tmux command line, ..., hence why I phrased it more generally as "terminal environments". Not sure how much of a slippery slope it is, I don't think there are many common commands that would be meaningful in the pattern prompt, and especially none that are as ubiquitous across different applications as <C-u>. We should definitely have moving backwards and forwards (plus deleting words) if we start adding support for "commands". That might be it, though? > Yeah, but it's also supported in many non-shell places in "the terminal" Oh, and perhaps notably <C-u> is supported in the login prompt on Linux as well, before you even get a shell. > We should definitely have moving backwards and forwards (plus deleting words) if we start adding support for "commands". That might be it, though? Yeah it seems moving forward and backward is about as ubiquitous. Would be nice if a library gives us everything for free - I've edited the description to say this should be checked - but I wouldn't mind introducing one without the other if it's easier since it's better than nothing. Deleting words with <C-w> also seems useful and almost as common (not supported in login prompt, where I guess it wouldn't be very helpful anyway, but just about anywhere else). Hi @jeysal @SimenB could I work on this issue? I think I can implement the `CTRL + U` functionality. Here is what I have in mind: - add `CONTROL_U` in the `KEYS` map in `constants.ts` - in the `put` method in the `Prompt` class whenever `CONTROL_U` is pressed set `this._value = ""`, `this._offset = -1`, `this._selection = null` & call `this._onChange()` @parthsharma2 sounds about right yeah :)
"2021-04-29T15:42:08Z"
27.0
[]
[ "packages/jest-watcher/src/lib/__tests__/prompt.test.ts" ]
TypeScript
[]
[]
jestjs/jest
11,364
jestjs__jest-11364
[ "11363" ]
26cb29a42cc9b967f29d70d8c70169993965cf01
diff --git a/packages/jest-each/src/table/array.ts b/packages/jest-each/src/table/array.ts --- a/packages/jest-each/src/table/array.ts +++ b/packages/jest-each/src/table/array.ts @@ -11,10 +11,11 @@ import type {Global} from '@jest/types'; import {format as pretty} from 'pretty-format'; import type {EachTests} from '../bind'; -const SUPPORTED_PLACEHOLDERS = /%[sdifjoOp%]/g; +const SUPPORTED_PLACEHOLDERS = /%[sdifjoOp]/g; const PRETTY_PLACEHOLDER = '%p'; const INDEX_PLACEHOLDER = '%#'; const PLACEHOLDER_PREFIX = '%'; +const ESCAPED_PLACEHOLDER_PREFIX = /%%/g; const JEST_EACH_PLACEHOLDER_ESCAPE = '@@__JEST_EACH_PLACEHOLDER_ESCAPE__@@'; export default (title: string, arrayTable: Global.ArrayTable): EachTests => @@ -46,17 +47,23 @@ const formatTitle = ( return interpolatePrettyPlaceholder(formattedTitle, normalisedValue); return util.format(formattedTitle, normalisedValue); - }, interpolateTitleIndex(title, rowIndex)) + }, interpolateTitleIndex(interpolateEscapedPlaceholders(title), rowIndex)) .replace(new RegExp(JEST_EACH_PLACEHOLDER_ESCAPE, 'g'), PLACEHOLDER_PREFIX); const normalisePlaceholderValue = (value: unknown) => - typeof value === 'string' && SUPPORTED_PLACEHOLDERS.test(value) - ? value.replace(PLACEHOLDER_PREFIX, JEST_EACH_PLACEHOLDER_ESCAPE) + typeof value === 'string' + ? value.replace( + new RegExp(PLACEHOLDER_PREFIX, 'g'), + JEST_EACH_PLACEHOLDER_ESCAPE, + ) : value; const getMatchingPlaceholders = (title: string) => title.match(SUPPORTED_PLACEHOLDERS) || []; +const interpolateEscapedPlaceholders = (title: string) => + title.replace(ESCAPED_PLACEHOLDER_PREFIX, JEST_EACH_PLACEHOLDER_ESCAPE); + const interpolateTitleIndex = (title: string, index: number) => title.replace(INDEX_PLACEHOLDER, index.toString());
diff --git a/packages/jest-each/src/__tests__/array.test.ts b/packages/jest-each/src/__tests__/array.test.ts --- a/packages/jest-each/src/__tests__/array.test.ts +++ b/packages/jest-each/src/__tests__/array.test.ts @@ -143,19 +143,22 @@ describe('jest-each', () => { ], ]); const testFunction = get(eachObject, keyPath); - testFunction('expected string: %s %d %s %s %d %j %s %j %d %d %#', noop); + testFunction( + 'expected string: %% %%s %s %d %s %s %d %j %s %j %d %d %#', + noop, + ); const globalMock = get(globalTestMocks, keyPath); expect(globalMock).toHaveBeenCalledTimes(2); expect(globalMock).toHaveBeenCalledWith( - `expected string: hello 1 null undefined 1.2 ${JSON.stringify({ + `expected string: % %s hello 1 null undefined 1.2 ${JSON.stringify({ foo: 'bar', })} () => {} [] Infinity NaN 0`, expectFunction, undefined, ); expect(globalMock).toHaveBeenCalledWith( - `expected string: world 1 null undefined 1.2 ${JSON.stringify({ + `expected string: % %s world 1 null undefined 1.2 ${JSON.stringify({ baz: 'qux', })} () => {} [] Infinity NaN 1`, expectFunction, @@ -400,12 +403,13 @@ describe('jest-each', () => { const eachObject = each.withGlobal(globalTestMocks)([ ['hello', '%d', 10, '%s', {foo: 'bar'}], ['world', '%i', 1991, '%p', {foo: 'bar'}], + ['joe', '%d %d', 10, '%%s', {foo: 'bar'}], ]); const testFunction = get(eachObject, keyPath); testFunction('expected string: %s %s %d %s %p', () => {}); const globalMock = get(globalTestMocks, keyPath); - expect(globalMock).toHaveBeenCalledTimes(2); + expect(globalMock).toHaveBeenCalledTimes(3); expect(globalMock).toHaveBeenCalledWith( 'expected string: hello %d 10 %s {"foo": "bar"}', expectFunction, @@ -416,6 +420,11 @@ describe('jest-each', () => { expectFunction, undefined, ); + expect(globalMock).toHaveBeenCalledWith( + 'expected string: joe %d %d 10 %%s {"foo": "bar"}', + expectFunction, + undefined, + ); }); }); });
Wrong interpolation in `test.each` when the value of array contains multiple `%` ## πŸ› Bug Report When the value of array contains multiple `%`, wrong interpolation may occur. ## To Reproduce index.text.js ```javascript describe("test name should to be `%d %d`", () => { test.each([["%d %d", 1, 2, 3]])("%s", (_, a, b, expected) => { expect(a + b).toBe(expected); }); }); describe("test name should to be `%%d`", () => { test.each([["%%d", 1, 2, 3]])("%s", (_, a, b, expected) => { expect(a + b).toBe(expected); }); }); ``` Run `npx jest`, then you can see the following: ```console $ npx jest PASS ./index.test.js test name should to be `%d %d` βœ“ %d 1 (2 ms) test name should to be `%%d` βœ“ %1 Test Suites: 1 passed, 1 total Tests: 2 passed, 2 total Snapshots: 0 total Time: 0.324 s, estimated 1 s Ran all test suites. ``` ## Expected behavior ```console $ npx jest PASS ./index.test.js test name should to be `%d %d` βœ“ %d %d (2 ms) test name should to be `%%d` βœ“ %%d Test Suites: 1 passed, 1 total Tests: 2 passed, 2 total Snapshots: 0 total Time: 0.324 s, estimated 1 s Ran all test suites. ``` ## Link to repl or repo (highly encouraged) ## envinfo <!-- Run npx envinfo --preset jest Paste the results here: --> ``` System: OS: Linux 4.19 Debian GNU/Linux 10 (buster) 10 (buster) CPU: (8) x64 Intel(R) Core(TM) i7-9700K CPU @ 3.60GHz Binaries: Node: 14.16.1 - ~/.nvm/versions/node/v14.16.1/bin/node Yarn: 1.22.10 - ~/.nvm/versions/node/v14.16.1/bin/yarn npm: 7.11.1 - ~/.nvm/versions/node/v14.16.1/bin/npm npmPackages: jest: ^26.6.3 => 26.6.3 ```
"2021-05-01T05:10:57Z"
27.0
[]
[ "packages/jest-each/src/__tests__/array.test.ts" ]
TypeScript
[]
[]
jestjs/jest
11,388
jestjs__jest-11388
[ "10394" ]
705af6854e2f4b32775d65bb06a30c596fad5612
diff --git a/packages/jest-each/src/table/array.ts b/packages/jest-each/src/table/array.ts --- a/packages/jest-each/src/table/array.ts +++ b/packages/jest-each/src/table/array.ts @@ -10,6 +10,8 @@ import * as util from 'util'; import type {Global} from '@jest/types'; import {format as pretty} from 'pretty-format'; import type {EachTests} from '../bind'; +import type {Templates} from './interpolation'; +import {interpolateVariables} from './interpolation'; const SUPPORTED_PLACEHOLDERS = /%[sdifjoOp]/g; const PRETTY_PLACEHOLDER = '%p'; @@ -18,11 +20,29 @@ const PLACEHOLDER_PREFIX = '%'; const ESCAPED_PLACEHOLDER_PREFIX = /%%/g; const JEST_EACH_PLACEHOLDER_ESCAPE = '@@__JEST_EACH_PLACEHOLDER_ESCAPE__@@'; -export default (title: string, arrayTable: Global.ArrayTable): EachTests => - normaliseTable(arrayTable).map((row, index) => ({ +export default (title: string, arrayTable: Global.ArrayTable): EachTests => { + if (isTemplates(title, arrayTable)) { + return arrayTable.map((template, index) => ({ + arguments: [template], + title: interpolateVariables(title, template, index).replace( + ESCAPED_PLACEHOLDER_PREFIX, + PLACEHOLDER_PREFIX, + ), + })); + } + return normaliseTable(arrayTable).map((row, index) => ({ arguments: row, title: formatTitle(title, row, index), })); +}; + +const isTemplates = ( + title: string, + arrayTable: Global.ArrayTable, +): arrayTable is Templates => + !SUPPORTED_PLACEHOLDERS.test(interpolateEscapedPlaceholders(title)) && + !isTable(arrayTable) && + arrayTable.every(col => col != null && typeof col === 'object'); const normaliseTable = (table: Global.ArrayTable): Global.Table => isTable(table) ? table : table.map(colToRow); diff --git a/packages/jest-each/src/table/interpolation.ts b/packages/jest-each/src/table/interpolation.ts new file mode 100644 --- /dev/null +++ b/packages/jest-each/src/table/interpolation.ts @@ -0,0 +1,86 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + * + */ + +import {isPrimitive} from 'jest-get-type'; +import {format as pretty} from 'pretty-format'; + +export type Template = Record<string, unknown>; +export type Templates = Array<Template>; +export type Headings = Array<string>; + +export const interpolateVariables = ( + title: string, + template: Template, + index: number, +): string => + Object.keys(template) + .reduce(getMatchingKeyPaths(title), []) // aka flatMap + .reduce(replaceKeyPathWithValue(template), title) + .replace('$#', '' + index); + +const getMatchingKeyPaths = (title: string) => ( + matches: Headings, + key: string, +) => matches.concat(title.match(new RegExp(`\\$${key}[\\.\\w]*`, 'g')) || []); + +const replaceKeyPathWithValue = (template: Template) => ( + title: string, + match: string, +) => { + const keyPath = match.replace('$', '').split('.'); + const value = getPath(template, keyPath); + + if (isPrimitive(value)) { + return title.replace(match, String(value)); + } + return title.replace(match, pretty(value, {maxDepth: 1, min: true})); +}; + +/* eslint import/export: 0*/ +export function getPath< + Obj extends Template, + A extends keyof Obj, + B extends keyof Obj[A], + C extends keyof Obj[A][B], + D extends keyof Obj[A][B][C], + E extends keyof Obj[A][B][C][D] +>(obj: Obj, path: [A, B, C, D, E]): Obj[A][B][C][D][E]; +export function getPath< + Obj extends Template, + A extends keyof Obj, + B extends keyof Obj[A], + C extends keyof Obj[A][B], + D extends keyof Obj[A][B][C] +>(obj: Obj, path: [A, B, C, D]): Obj[A][B][C][D]; +export function getPath< + Obj extends Template, + A extends keyof Obj, + B extends keyof Obj[A], + C extends keyof Obj[A][B] +>(obj: Obj, path: [A, B, C]): Obj[A][B][C]; +export function getPath< + Obj extends Template, + A extends keyof Obj, + B extends keyof Obj[A] +>(obj: Obj, path: [A, B]): Obj[A][B]; +export function getPath<Obj extends Template, A extends keyof Obj>( + obj: Obj, + path: [A], +): Obj[A]; +export function getPath<Obj extends Template>( + obj: Obj, + path: Array<string>, +): unknown; +export function getPath( + template: Template, + [head, ...tail]: Array<string>, +): unknown { + if (!head || !template.hasOwnProperty || !template.hasOwnProperty(head)) + return template; + return getPath(template[head] as Template, tail); +} diff --git a/packages/jest-each/src/table/template.ts b/packages/jest-each/src/table/template.ts --- a/packages/jest-each/src/table/template.ts +++ b/packages/jest-each/src/table/template.ts @@ -7,13 +7,9 @@ */ import type {Global} from '@jest/types'; -import {isPrimitive} from 'jest-get-type'; -import {format as pretty} from 'pretty-format'; import type {EachTests} from '../bind'; - -type Template = Record<string, unknown>; -type Templates = Array<Template>; -type Headings = Array<string>; +import type {Headings, Template, Templates} from './interpolation'; +import {interpolateVariables} from './interpolation'; export default ( title: string, @@ -24,7 +20,7 @@ export default ( const templates = convertTableToTemplates(table, headings); return templates.map((template, index) => ({ arguments: [template], - title: interpolate(title, template, index), + title: interpolateVariables(title, template, index), })); }; @@ -46,71 +42,3 @@ const convertTableToTemplates = ( {}, ), ); - -const interpolate = (title: string, template: Template, index: number) => - Object.keys(template) - .reduce(getMatchingKeyPaths(title), []) // aka flatMap - .reduce(replaceKeyPathWithValue(template), title) - .replace('$#', '' + index); - -const getMatchingKeyPaths = (title: string) => ( - matches: Headings, - key: string, -) => matches.concat(title.match(new RegExp(`\\$${key}[\\.\\w]*`, 'g')) || []); - -const replaceKeyPathWithValue = (template: Template) => ( - title: string, - match: string, -) => { - const keyPath = match.replace('$', '').split('.'); - const value = getPath(template, keyPath); - - if (isPrimitive(value)) { - return title.replace(match, String(value)); - } - return title.replace(match, pretty(value, {maxDepth: 1, min: true})); -}; - -/* eslint import/export: 0*/ -export function getPath< - Obj extends Template, - A extends keyof Obj, - B extends keyof Obj[A], - C extends keyof Obj[A][B], - D extends keyof Obj[A][B][C], - E extends keyof Obj[A][B][C][D] ->(obj: Obj, path: [A, B, C, D, E]): Obj[A][B][C][D][E]; -export function getPath< - Obj extends Template, - A extends keyof Obj, - B extends keyof Obj[A], - C extends keyof Obj[A][B], - D extends keyof Obj[A][B][C] ->(obj: Obj, path: [A, B, C, D]): Obj[A][B][C][D]; -export function getPath< - Obj extends Template, - A extends keyof Obj, - B extends keyof Obj[A], - C extends keyof Obj[A][B] ->(obj: Obj, path: [A, B, C]): Obj[A][B][C]; -export function getPath< - Obj extends Template, - A extends keyof Obj, - B extends keyof Obj[A] ->(obj: Obj, path: [A, B]): Obj[A][B]; -export function getPath<Obj extends Template, A extends keyof Obj>( - obj: Obj, - path: [A], -): Obj[A]; -export function getPath<Obj extends Template>( - obj: Obj, - path: Array<string>, -): unknown; -export function getPath( - template: Template, - [head, ...tail]: Array<string>, -): unknown { - if (!head || !template.hasOwnProperty || !template.hasOwnProperty(head)) - return template; - return getPath(template[head] as Template, tail); -}
diff --git a/packages/jest-each/src/__tests__/array.test.ts b/packages/jest-each/src/__tests__/array.test.ts --- a/packages/jest-each/src/__tests__/array.test.ts +++ b/packages/jest-each/src/__tests__/array.test.ts @@ -291,6 +291,83 @@ describe('jest-each', () => { 10000, ); }); + + test('calls global with title containing object property when using $variable', () => { + const globalTestMocks = getGlobalTestMocks(); + const eachObject = each.withGlobal(globalTestMocks)([ + { + a: 'hello', + b: 1, + c: null, + d: undefined, + e: 1.2, + f: {key: 'foo'}, + g: () => {}, + h: [], + i: Infinity, + j: NaN, + }, + { + a: 'world', + b: 1, + c: null, + d: undefined, + e: 1.2, + f: {key: 'bar'}, + g: () => {}, + h: [], + i: Infinity, + j: NaN, + }, + ]); + const testFunction = get(eachObject, keyPath); + testFunction( + 'expected string: %% %%s $a $b $c $d $e $f $f.key $g $h $i $j $#', + noop, + ); + + const globalMock = get(globalTestMocks, keyPath); + expect(globalMock).toHaveBeenCalledTimes(2); + expect(globalMock).toHaveBeenCalledWith( + 'expected string: % %s hello 1 null undefined 1.2 {"key": "foo"} foo [Function g] [] Infinity NaN 0', + expectFunction, + undefined, + ); + expect(globalMock).toHaveBeenCalledWith( + 'expected string: % %s world 1 null undefined 1.2 {"key": "bar"} bar [Function g] [] Infinity NaN 1', + expectFunction, + undefined, + ); + }); + + test('calls global with title containing param values when using both % placeholder and $variable', () => { + const globalTestMocks = getGlobalTestMocks(); + const eachObject = each.withGlobal(globalTestMocks)([ + { + a: 'hello', + b: 1, + }, + { + a: 'world', + b: 1, + }, + ]); + const testFunction = get(eachObject, keyPath); + testFunction('expected string: %p %# $a $b $#', noop); + + const globalMock = get(globalTestMocks, keyPath); + expect(globalMock).toHaveBeenCalledTimes(2); + expect(globalMock).toHaveBeenCalledWith( + 'expected string: {"a": "hello", "b": 1} 0 $a $b $#', + expectFunction, + undefined, + ); + expect(globalMock).toHaveBeenCalledWith( + 'expected string: {"a": "world", "b": 1} 1 $a $b $#', + expectFunction, + undefined, + ); + }); }); });
`test.each` should perform string interpolation with object properties <!-- Love Jest? Please consider supporting our collective: πŸ‘‰ https://opencollective.com/jest/donate --> ## πŸš€ Feature Proposal When giving an array of objects to the `test.each` overload that accepts an array, and not a template strings table, jest should interpolate the object's properties into the test name. ## Motivation The current interpolation of template strings works great, but it doesn't provide any hook for type-safety. This is problematic when scaling up to large and complex test cases, and in TypeScript, it allows `any` to sneak into your test functions. It also is more intuitive, it left me wondering for a while why a transformation to a type-safe version of a test wasn't causing interpolation of the test case. ## Example We can do something like this right now: ```typescript test.each` a | b | expectedResult ${1} | ${1} | ${2} ${2} | ${2} | ${4} `('add($a, $b) === $expectedResult', ({ a, b, expectedResult }) => { expect(a + b).toEqual(expectedResult); }); ``` Which will output the test cases: √ add(1, 1) === 2 √ add(2, 2) === 4 In order to achieve type-safety on the same test, I would make this transformation: ```typescript type AddExample = { a: number; b: number; expectedResult: number; }; test.each<AddExample>([ { a: 1, b: 1, expectedResult: 2 }, { a: 2, b: 2, expectedResult: 4 }, ])('add($a, $b) === $expectedResult', ({ a, b, expectedResult }) => { expect(a + b).toEqual(expectedResult); }); ``` Which I would expect should output the same test cases as above, but instead: √ add($a, $b) === $expectedResult √ add($a, $b) === $expectedResult ## Pitch It is isomorphic to functionality in the core library, but I believe it cannot be implemented as an extension.
@mattphillips thoughts? Seems reasonable to me This has been something I've wanted to add for a while but not got around to it and I think we should add parity with the tagged template table with `$` syntax for sure πŸ‘ Hey @mattphillips, I was looking at the code to see how I could contribute this change. Was wondering what your thoughts were on ensuring it's not breaking. Perhaps if the test name contains any of the printf special characters, use the printf formatting logic, else use variable interpolation? Found this in my inbox and noticed it hasn't been replied to, so I'll just go ahead and answer from my perspective :sweat_smile: @nth-commit I would say as long as we land it in a major version (27.0.0 is likely coming up soon), it'd be fine to have the small breaking change. It's particularly small since only existing `$variable`s will be interpolated, `$b` if there is no `b` variable in the template string is already being ignored, so most test case names even if they have `$` in their name will not be affected.
"2021-05-08T05:22:58Z"
27.0
[]
[ "packages/jest-each/src/__tests__/array.test.ts" ]
TypeScript
[]
[]
jestjs/jest
11,548
jestjs__jest-11548
[ "9728" ]
d1882f2e6033186bd310240add41ffe50c2a9259
diff --git a/packages/jest-core/src/SearchSource.ts b/packages/jest-core/src/SearchSource.ts --- a/packages/jest-core/src/SearchSource.ts +++ b/packages/jest-core/src/SearchSource.ts @@ -288,18 +288,7 @@ export default class SearchSource { let paths = globalConfig.nonFlagArgs; if (globalConfig.findRelatedTests && 'win32' === os.platform()) { - const allFiles = this._context.hasteFS.getAllFiles(); - const options = {nocase: true, windows: false}; - - paths = paths - .map(p => { - const relativePath = path - .resolve(this._context.config.cwd, p) - .replace(/\\/g, '\\\\'); - const match = micromatch(allFiles, relativePath, options); - return match[0]; - }) - .filter(Boolean); + paths = this.filterPathsWin32(paths); } if (globalConfig.runTestsByPath && paths && paths.length) { @@ -316,6 +305,32 @@ export default class SearchSource { } } + public filterPathsWin32(paths: Array<string>): Array<string> { + const allFiles = this._context.hasteFS.getAllFiles(); + const options = {nocase: true, windows: false}; + + function normalizePosix(filePath: string) { + return filePath.replace(/\\/g, '/'); + } + + paths = paths + .map(p => { + // micromatch works with forward slashes: https://github.com/micromatch/micromatch#backslashes + const normalizedPath = normalizePosix( + path.resolve(this._context.config.cwd, p), + ); + const match = micromatch( + allFiles.map(normalizePosix), + normalizedPath, + options, + ); + return match[0]; + }) + .filter(Boolean) + .map(p => path.resolve(p)); + return paths; + } + async getTestPaths( globalConfig: Config.GlobalConfig, changedFiles: ChangedFiles | undefined,
diff --git a/packages/jest-core/src/__tests__/SearchSource.test.ts b/packages/jest-core/src/__tests__/SearchSource.test.ts --- a/packages/jest-core/src/__tests__/SearchSource.test.ts +++ b/packages/jest-core/src/__tests__/SearchSource.test.ts @@ -406,6 +406,81 @@ describe('SearchSource', () => { }); }); + describe('filterPathsWin32', () => { + beforeEach(async () => { + const config = ( + await normalize( + { + name, + rootDir: '.', + roots: [], + }, + {} as Config.Argv, + ) + ).options; + const context = await Runtime.createContext(config, { + maxWorkers, + watchman: false, + }); + + searchSource = new SearchSource(context); + context.hasteFS.getAllFiles = () => [ + path.resolve('packages/lib/my-lib.ts'), + path.resolve('packages/@core/my-app.ts'), + path.resolve('packages/+cli/my-cli.ts'), + path.resolve('packages/.hidden/my-app-hidden.ts'), + path.resolve('packages/programs (x86)/my-program.ts'), + ]; + }); + + it('should allow a simple match', async () => { + const result = searchSource.filterPathsWin32(['packages/lib/my-lib.ts']); + expect(result).toEqual([path.resolve('packages/lib/my-lib.ts')]); + }); + it('should allow to match a file inside a hidden directory', async () => { + const result = searchSource.filterPathsWin32([ + 'packages/.hidden/my-app-hidden.ts', + ]); + expect(result).toEqual([ + path.resolve('packages/.hidden/my-app-hidden.ts'), + ]); + }); + it('should allow to match a file inside a directory prefixed with a "@"', async () => { + const result = searchSource.filterPathsWin32([ + 'packages/@core/my-app.ts', + ]); + expect(result).toEqual([path.resolve('packages/@core/my-app.ts')]); + }); + it('should allow to match a file inside a directory prefixed with a "+"', async () => { + const result = searchSource.filterPathsWin32(['packages/+cli/my-cli.ts']); + expect(result).toEqual([path.resolve('packages/+cli/my-cli.ts')]); + }); + it('should allow an @(pattern)', () => { + const result = searchSource.filterPathsWin32([ + 'packages/@(@core)/my-app.ts', + ]); + expect(result).toEqual([path.resolve('packages/@core/my-app.ts')]); + }); + it('should allow a +(pattern)', () => { + const result = searchSource.filterPathsWin32([ + 'packages/+(@core)/my-app.ts', + ]); + expect(result).toEqual([path.resolve('packages/@core/my-app.ts')]); + }); + it('should allow for (pattern) in file path', () => { + const result = searchSource.filterPathsWin32([ + 'packages/programs (x86)/my-program.ts', + ]); + expect(result).toEqual([ + path.resolve('packages/programs (x86)/my-program.ts'), + ]); + }); + it('should allow no results found', () => { + const result = searchSource.filterPathsWin32(['not/exists']); + expect(result).toHaveLength(0); + }); + }); + describe('findRelatedTests', () => { const rootDir = path.join( __dirname,
Jest25: --findRelatedTests not working in hidden directory on windows ## πŸ› Bug Report Since v25, `--findRelatedTests` no longer works on Windows when running in a hidden directory (a directory starting with a `.`). ## To Reproduce A really small reproduction repo can be found here: https://github.com/nicojs/jest-find-related-test-bug On windows: ``` git clone [email protected]:nicojs/jest-find-related-test-bug.git cd jest-find-related-test-bug/.hidden npm i npx jest --findRelatedTests src/sum.js ``` ## Expected behavior ``` PASS src/sum.test.js √ adds 1 + 2 to equal 3 (2ms) √ sub 1 - 0 to equal 1 Test Suites: 1 passed, 1 total Tests: 2 passed, 2 total Snapshots: 0 total Time: 3.381s Ran all test suites related to files matching /src\\sum.js/i. ``` But was: ``` No tests found, exiting with code 1 Run with `--passWithNoTests` to exit with code 0 In C:\z\github\nicojs\jest-find-related-test-bug\.hidden 4 files checked. testMatch: **/__tests__/**/*.[jt]s?(x), **/?(*.)+(spec|test).[tj]s?(x) - 1 match testPathIgnorePatterns: \\node_modules\\ - 4 matches testRegex: - 0 matches Pattern: src\\sum.js - 0 matches ``` This behavior broke with jest@25. I.e. this works: ``` npm i jest@24 npx jest --findRelatedTests src/sum.js ``` Moving the code to a non-hidden directory also works: ``` cd .. mv .hidden hidden cd hidden npx jest --findRelatedTests src/sum.js ``` Or Linux also seems to work (tested with Windows Subsystem for Linux). ## Link to repl or repo (highly encouraged) https://github.com/nicojs/jest-find-related-test-bug ## envinfo ``` System: OS: Windows 10 10.0.18363 CPU: (8) x64 Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz Binaries: Node: 12.16.1 - C:\Program Files\nodejs\node.EXE Yarn: 1.12.3 - ~\AppData\Roaming\npm\yarn.CMD npm: 6.13.6 - C:\Program Files\nodejs\npm.CMD npmPackages: jest: ^25.2.3 => 25.2.3 ```
"2021-06-08T20:25:16Z"
27.0
[]
[ "packages/jest-core/src/__tests__/SearchSource.test.ts" ]
TypeScript
[]
[]
jestjs/jest
11,882
jestjs__jest-11882
[ "11873" ]
f4188de69812651ed1408eac1baeebed6414dd9c
diff --git a/packages/jest-runtime/src/index.ts b/packages/jest-runtime/src/index.ts --- a/packages/jest-runtime/src/index.ts +++ b/packages/jest-runtime/src/index.ts @@ -861,12 +861,14 @@ export default class Runtime { {conditions: this.cjsConditions}, ); - const mockRegistry = this._isolatedMockRegistry || this._mockRegistry; - - if (mockRegistry.get(moduleID)) { - return mockRegistry.get(moduleID); + if (this._isolatedMockRegistry?.has(moduleID)) { + return this._isolatedMockRegistry.get(moduleID); + } else if (this._mockRegistry.has(moduleID)) { + return this._mockRegistry.get(moduleID); } + const mockRegistry = this._isolatedMockRegistry || this._mockRegistry; + if (this._mockFactories.has(moduleID)) { // has check above makes this ok const module = this._mockFactories.get(moduleID)!();
diff --git a/e2e/__tests__/isolateModules.test.ts b/e2e/__tests__/isolateModules.test.ts new file mode 100644 --- /dev/null +++ b/e2e/__tests__/isolateModules.test.ts @@ -0,0 +1,55 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import {tmpdir} from 'os'; +import * as path from 'path'; +import {cleanup, createEmptyPackage, writeFiles} from '../Utils'; +import runJest from '../runJest'; + +const DIR = path.resolve(tmpdir(), 'isolate-modules.test'); + +beforeEach(() => { + cleanup(DIR); + createEmptyPackage(DIR); +}); + +afterAll(() => cleanup(DIR)); + +test('works with mocks', () => { + writeFiles(DIR, { + 'config.js': ` + module.exports.getBoolean = function getBoolean(variableName) { + return false; + } + `, + 'read.js': ` + const {getBoolean} = require('./config'); + + const value = getBoolean('foo'); + console.log("was " + value); + `, + 'test.js': ` + jest.mock('./config'); + const config = require('./config'); + + test('dummy test', () => { + const configGetMock = config.getBoolean.mockImplementation(() => { + return true; + }); + + jest.isolateModules(() => { + require("./read"); + }); + + expect(configGetMock).toBeCalledTimes(1); + }) + `, + }); + const {exitCode} = runJest(DIR); + + expect(exitCode).toBe(0); +});
[Bug]: issue with jest-runtime between 27.1.0 and 27.1.1 and jest.isolateModules ### Version 27.2.0 ### Steps to reproduce unable to link my repo, but can give an example that currently works. seems how mocks are applied changed between 27.1.0 and 27.1.1. assume related to this PR: https://github.com/facebook/jest/pull/11818 ### Expected behavior ```javascript const someModule = require('some-module'); jest.mock('some-module') describe('test', () => { let app; beforeEach(() => { jest.isolateModules(() => { app = require('./app.js') }) }) test('expect module to have been called', () => { expect(someModule.function).toHaveBeenCalled(); }) }) ``` I would expect this to pass, which is currently does if I pass in a yarn resolution to downgrade `jest-runtime` to version `27.1.0`. ### Actual behavior Using above example, the instance of `some-module` inside of the test and inside the required file appear to be different. Inside the file, the module is indeed mocked, but the expectation does not pass. ### Additional context _No response_ ### Environment ```shell System: OS: macOS 11.5.2 CPU: (12) x64 Intel(R) Core(TM) i9-8950HK CPU @ 2.90GHz Binaries: Node: 14.17.6 - /usr/local/bin/node Yarn: 1.22.11 - /usr/local/bin/yarn npm: 7.21.1 - /usr/local/bin/npm npmPackages: jest: 27.2.0 => 27.2.0 ```
have the same problem with ts-jest, here is a repro repo https://github.com/rburgst/ts-jest-27.1 Please put together a minimal repository showing the error (i.e. without `ts-jest` - I have zero clue what `mocked` is). Ideally without typescript etc either Actually never mind, just ripping it out wasn't too bad - fix incoming πŸ™‚
"2021-09-16T12:15:56Z"
27.2
[]
[ "e2e/__tests__/isolateModules.test.ts" ]
TypeScript
[]
[]
jestjs/jest
11,902
jestjs__jest-11902
[ "11900" ]
8024306c365cdf8b17b2256c73f1f4c9f23f8f77
diff --git a/packages/jest-cli/src/cli/args.ts b/packages/jest-cli/src/cli/args.ts --- a/packages/jest-cli/src/cli/args.ts +++ b/packages/jest-cli/src/cli/args.ts @@ -549,9 +549,9 @@ export const options = { }, testEnvironmentOptions: { description: - 'Test environment options that will be passed to the testEnvironment. ' + + 'A JSON string with options that will be passed to the `testEnvironment`. ' + 'The relevant options depend on the environment.', - type: 'string', // Object + type: 'string', }, testFailureExitCode: { description: 'Exit code of `jest` command if the test run failed', diff --git a/packages/jest-config/src/setFromArgv.ts b/packages/jest-config/src/setFromArgv.ts --- a/packages/jest-config/src/setFromArgv.ts +++ b/packages/jest-config/src/setFromArgv.ts @@ -35,9 +35,10 @@ export default function setFromArgv( break; case 'coverageThreshold': case 'globals': + case 'haste': case 'moduleNameMapper': + case 'testEnvironmentOptions': case 'transform': - case 'haste': const str = argv[key]; if (isJSONString(str)) { options[key] = JSON.parse(str); diff --git a/packages/jest-types/src/Config.ts b/packages/jest-types/src/Config.ts --- a/packages/jest-types/src/Config.ts +++ b/packages/jest-types/src/Config.ts @@ -462,6 +462,7 @@ export type Argv = Arguments< silent: boolean; snapshotSerializers: Array<string>; testEnvironment: string; + testEnvironmentOptions: string; testFailureExitCode: string | null | undefined; testMatch: Array<string>; testNamePattern: string;
diff --git a/packages/jest-config/src/__tests__/setFromArgv.test.ts b/packages/jest-config/src/__tests__/setFromArgv.test.ts --- a/packages/jest-config/src/__tests__/setFromArgv.test.ts +++ b/packages/jest-config/src/__tests__/setFromArgv.test.ts @@ -47,6 +47,7 @@ test('works with string objects', () => { const argv = { moduleNameMapper: '{"types/(.*)": "<rootDir>/src/types/$1", "types2/(.*)": ["<rootDir>/src/types2/$1", "<rootDir>/src/types3/$1"]}', + testEnvironmentOptions: '{"userAgent": "Agent/007"}', transform: '{"*.js": "<rootDir>/transformer"}', } as Config.Argv; expect(setFromArgv(options, argv)).toMatchObject({ @@ -54,6 +55,9 @@ test('works with string objects', () => { 'types/(.*)': '<rootDir>/src/types/$1', 'types2/(.*)': ['<rootDir>/src/types2/$1', '<rootDir>/src/types3/$1'], }, + testEnvironmentOptions: { + userAgent: 'Agent/007', + }, transform: { '*.js': '<rootDir>/transformer', },
[Bug]: `testEnvironmentOptions` documentation/types are wrong ### Version 27.2.2 ### Steps to reproduce 1. define a custom environment, eg. ```ts class CustomEnvironment extends NodeEnvironment { constructor(config) { super(config) const options = config.testEnvironmentOptions //type is Record<string, unknown> console.log(typeof options) } } ``` (this step propbably isn't necessary to reproduce the issue but makes it easier to see the problem) 2. run `jest -h` ``` --testEnvironmentOptions Test environment options that will be passed to the testEnvironment. The relevant options depend on the environment. [string] ``` notice the type `string` 3. pass a value to `--testEnvironmentOptions`, assuming it gets `JSON.parse`d into a `Record`: ``` jest --testEnvironmentOptions="{ foo: 'bar' }" ``` ### Expected behavior if the type passed in the cli is `string`but the type in ts is `Record<string, unknown>`, i'd expect it to parse the JSON into a `Record` or something. therefore, the `console.log` from the custom environment's constructor outputs `"object"` ### Actual behavior the `console.log` from the custom environment's constructor outputs `"string"` ### Additional context `testEnvironmentOptions` isn't even present [here](https://jestjs.io/docs/cli). are you even supposed to be able to pass it in the command line? ### Environment ```shell System: OS: Windows 10 10.0.18363 CPU: (6) x64 Intel(R) Core(TM) i5-8600 CPU @ 3.10GHz Binaries: Node: 16.9.1 - C:\Program Files\nodejs\node.EXE npm: 7.24.1 - C:\Program Files\nodejs\npm.CMD npmPackages: jest: ^27.2.2 => 27.2.2 ```
"2021-09-28T08:12:50Z"
27.2
[]
[ "packages/jest-config/src/__tests__/setFromArgv.test.ts" ]
TypeScript
[]
[]
jestjs/jest
11,943
jestjs__jest-11943
[ "11927" ]
1547740bbc26400d69f4576bf35645163e942829
diff --git a/packages/jest-runtime/src/index.ts b/packages/jest-runtime/src/index.ts --- a/packages/jest-runtime/src/index.ts +++ b/packages/jest-runtime/src/index.ts @@ -1242,28 +1242,39 @@ export default class Runtime { ); } - const {paths} = options; - - if (paths) { - for (const p of paths) { - const absolutePath = path.resolve(from, '..', p); - const module = this._resolver.resolveModuleFromDirIfExists( - absolutePath, - moduleName, - // required to also resolve files without leading './' directly in the path - {conditions: this.cjsConditions, paths: [absolutePath]}, - ); - if (module) { - return module; - } + if (path.isAbsolute(moduleName)) { + const module = this._resolver.resolveModuleFromDirIfExists( + moduleName, + moduleName, + {conditions: this.cjsConditions, paths: []}, + ); + if (module) { + return module; } + } else { + const {paths} = options; + if (paths) { + for (const p of paths) { + const absolutePath = path.resolve(from, '..', p); + const module = this._resolver.resolveModuleFromDirIfExists( + absolutePath, + moduleName, + // required to also resolve files without leading './' directly in the path + {conditions: this.cjsConditions, paths: [absolutePath]}, + ); + if (module) { + return module; + } + } - throw new Resolver.ModuleNotFoundError( - `Cannot resolve module '${moduleName}' from paths ['${paths.join( - "', '", - )}'] from ${from}`, - ); + throw new Resolver.ModuleNotFoundError( + `Cannot resolve module '${moduleName}' from paths ['${paths.join( + "', '", + )}'] from ${from}`, + ); + } } + try { return this._resolveModule(from, moduleName, { conditions: this.cjsConditions,
diff --git a/packages/jest-runtime/src/__tests__/runtime_require_resolve.test.ts b/packages/jest-runtime/src/__tests__/runtime_require_resolve.test.ts --- a/packages/jest-runtime/src/__tests__/runtime_require_resolve.test.ts +++ b/packages/jest-runtime/src/__tests__/runtime_require_resolve.test.ts @@ -6,6 +6,9 @@ * */ +import os from 'os'; +import path from 'path'; +import {promises as fs} from 'graceful-fs'; import type {Config} from '@jest/types'; import type Runtime from '..'; import {createOutsideJestVmPath} from '../helpers'; @@ -15,6 +18,9 @@ let createRuntime: ( config?: Config.InitialOptions, ) => Promise<Runtime & {__mockRootPath: string}>; +const getTmpDir = async () => + await fs.mkdtemp(path.join(os.tmpdir(), 'jest-resolve-test-')); + describe('Runtime require.resolve', () => { beforeEach(() => { createRuntime = require('createRuntime'); @@ -29,6 +35,47 @@ describe('Runtime require.resolve', () => { expect(resolved).toEqual(require.resolve('./test_root/resolve_self.js')); }); + it('resolves an absolute module path', async () => { + const absoluteFilePath = path.join(await getTmpDir(), 'test.js'); + await fs.writeFile( + absoluteFilePath, + 'module.exports = require.resolve(__filename);', + 'utf-8', + ); + + const runtime = await createRuntime(__filename); + const resolved = runtime.requireModule( + runtime.__mockRootPath, + absoluteFilePath, + ); + + expect(resolved).toEqual(require.resolve(absoluteFilePath)); + }); + + it('required modules can resolve absolute module paths with no paths entries passed', async () => { + const tmpdir = await getTmpDir(); + const entrypoint = path.join(tmpdir, 'test.js'); + const target = path.join(tmpdir, 'target.js'); + + // we want to test the require.resolve implementation within a + // runtime-required module, so we need to create a module that then resolves + // an absolute path, so we need two files: the entrypoint, and an absolute + // target to require. + await fs.writeFile( + entrypoint, + `module.exports = require.resolve(${JSON.stringify( + target, + )}, {paths: []});`, + 'utf-8', + ); + + await fs.writeFile(target, `module.exports = {}`, 'utf-8'); + + const runtime = await createRuntime(__filename); + const resolved = runtime.requireModule(runtime.__mockRootPath, entrypoint); + expect(resolved).toEqual(require.resolve(target, {paths: []})); + }); + it('resolves a module path with moduleNameMapper', async () => { const runtime = await createRuntime(__filename, { moduleNameMapper: {
[Bug]: `require.resolve` with empty `paths` option does not locate absolute paths ### Version 27.2.4 ### Steps to reproduce See repo README at https://github.com/ekelen/jest-require-resolve-empty-paths-array OR: 1. Clone reproduction repo linked above and change current directory to repo root. 2. Run `cp test.js /tmp && node -p "require.resolve('/tmp/test', { paths: [] } )"` (returns path of `tmp/test.js` at filesystem root). 3. Run `yarn install`. 4. Run `yarn test`. ### Expected behavior No error should be thrown when `require.resolve` is called with a valid module at user's filesystem root and an empty `paths` array option. The output of `require.resolve("/tmp/test", { paths: ["/"] })` should match the output of `require.resolve("/tmp/test", { paths: [] })`. ### Actual behavior If provided an empty paths array, `ModuleNotFoundError` is thrown: `Cannot resolve module '/tmp/test' from paths [''] from <repo_root>/test.js`. ### Additional context Appears to be a parallel issue to https://github.com/facebook/jest/issues/9502. ### Environment ```shell System: OS: macOS 11.5.2 CPU: (12) x64 Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz Binaries: Node: 16.8.0 - ~/.volta/tools/image/node/16.8.0/bin/node Yarn: 1.22.15 - ~/.volta/tools/image/yarn/1.22.15/bin/yarn npm: 7.21.0 - ~/.volta/tools/image/node/16.8.0/bin/npm npmPackages: jest: 27.2.4 => 27.2.4 ```
"2021-10-09T01:33:23Z"
27.2
[]
[ "packages/jest-runtime/src/__tests__/runtime_require_resolve.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,089
jestjs__jest-12089
[ "9534" ]
c7397485f11e27d0e03f647283c2c9d9b6ce33ea
diff --git a/packages/jest-mock/src/index.ts b/packages/jest-mock/src/index.ts --- a/packages/jest-mock/src/index.ts +++ b/packages/jest-mock/src/index.ts @@ -32,6 +32,77 @@ export type MockFunctionMetadata< length?: number; }; +export type MockableFunction = (...args: Array<any>) => any; +export type MethodKeysOf<T> = { + [K in keyof T]: T[K] extends MockableFunction ? K : never; +}[keyof T]; +export type PropertyKeysOf<T> = { + [K in keyof T]: T[K] extends MockableFunction ? never : K; +}[keyof T]; + +export type ArgumentsOf<T> = T extends (...args: infer A) => any ? A : never; + +export type ConstructorArgumentsOf<T> = T extends new (...args: infer A) => any + ? A + : never; +export type MaybeMockedConstructor<T> = T extends new ( + ...args: Array<any> +) => infer R + ? MockInstance<R, ConstructorArgumentsOf<T>> + : T; +export type MockedFunction<T extends MockableFunction> = MockWithArgs<T> & { + [K in keyof T]: T[K]; +}; +export type MockedFunctionDeep<T extends MockableFunction> = MockWithArgs<T> & + MockedObjectDeep<T>; +export type MockedObject<T> = MaybeMockedConstructor<T> & { + [K in MethodKeysOf<T>]: T[K] extends MockableFunction + ? MockedFunction<T[K]> + : T[K]; +} & {[K in PropertyKeysOf<T>]: T[K]}; +export type MockedObjectDeep<T> = MaybeMockedConstructor<T> & { + [K in MethodKeysOf<T>]: T[K] extends MockableFunction + ? MockedFunctionDeep<T[K]> + : T[K]; +} & {[K in PropertyKeysOf<T>]: MaybeMockedDeep<T[K]>}; + +export type MaybeMockedDeep<T> = T extends MockableFunction + ? MockedFunctionDeep<T> + : T extends object + ? MockedObjectDeep<T> + : T; + +export type MaybeMocked<T> = T extends MockableFunction + ? MockedFunction<T> + : T extends object + ? MockedObject<T> + : T; + +export type ArgsType<T> = T extends (...args: infer A) => any ? A : never; +export type Mocked<T> = { + [P in keyof T]: T[P] extends (...args: Array<any>) => any + ? MockInstance<ReturnType<T[P]>, ArgsType<T[P]>> + : T[P] extends Constructable + ? MockedClass<T[P]> + : T[P]; +} & T; +export type MockedClass<T extends Constructable> = MockInstance< + InstanceType<T>, + T extends new (...args: infer P) => any ? P : never +> & { + prototype: T extends {prototype: any} ? Mocked<T['prototype']> : never; +} & T; + +export interface Constructable { + new (...args: Array<any>): any; +} + +export interface MockWithArgs<T extends MockableFunction> + extends MockInstance<ReturnType<T>, ArgumentsOf<T>> { + new (...args: ConstructorArgumentsOf<T>): T; + (...args: ArgumentsOf<T>): ReturnType<T>; +} + export interface Mock<T, Y extends Array<unknown> = Array<unknown>> extends Function, MockInstance<T, Y> { @@ -1109,9 +1180,19 @@ export class ModuleMocker { private _typeOf(value: any): string { return value == null ? '' + value : typeof value; } + + // the typings test helper + mocked<T>(item: T, deep?: false): MaybeMocked<T>; + + mocked<T>(item: T, deep: true): MaybeMockedDeep<T>; + + mocked<T>(item: T, _deep = false): MaybeMocked<T> | MaybeMockedDeep<T> { + return item as any; + } } const JestMock = new ModuleMocker(global as unknown as typeof globalThis); export const fn = JestMock.fn.bind(JestMock); export const spyOn = JestMock.spyOn.bind(JestMock); +export const mocked = JestMock.mocked.bind(JestMock);
diff --git a/packages/jest-mock/src/__tests__/index.test.ts b/packages/jest-mock/src/__tests__/index.test.ts --- a/packages/jest-mock/src/__tests__/index.test.ts +++ b/packages/jest-mock/src/__tests__/index.test.ts @@ -9,7 +9,7 @@ /* eslint-disable local/ban-types-eventually, local/prefer-rest-params-eventually */ import vm, {Context} from 'vm'; -import {ModuleMocker, fn, spyOn} from '../'; +import {ModuleMocker, fn, mocked, spyOn} from '../'; describe('moduleMocker', () => { let moduleMocker: ModuleMocker; @@ -1452,6 +1452,13 @@ describe('moduleMocker', () => { }); }); +describe('mocked', () => { + it('should return unmodified input', () => { + const subject = {}; + expect(mocked(subject)).toBe(subject); + }); +}); + test('`fn` and `spyOn` do not throw', () => { expect(() => { fn();
Include ts-jest mock util functions in jest core ## πŸš€ Feature Proposal This proposal is for the outcome of the discussion in [ts-jest](https://github.com/kulshekhar/ts-jest/issues/1048) that includes`ts-jest` mock util functions in `jest` core. ## Motivation Users don't have to install `ts-jest` anymore to access these util mocks, just need to install `jest`. ## Pitch [Suggestion](https://github.com/kulshekhar/ts-jest/issues/1048#issuecomment-482606650) from @thymikee that it should belong to jest core
Yeah, I think this is a good idea. There's been some changes to mocks in `@types/jest` as well, would be nice to bring the best features of both into Jest itself (at which point DT could pull the features from us instead). https://github.com/DefinitelyTyped/DefinitelyTyped/blob/a68259ee8883cc9899463fe79a78bb49a9d2f6de/types/jest/index.d.ts#L1042-L1298 Hi, can I take this issue? Yes please! πŸ™‚ Hi @SimenB, I'm working on this issue and don't have much progress. I have some doubts about the approach that I should follow. Actually I didn't understand your first comment really well. Should I try to "use" definition on `@types/jest` on `jest-mock` package and work on top of it to add `mocked` fn from `ts-jest`? Has this been abandoned? I can take a look Include as well another proposal in this area from https://github.com/kulshekhar/ts-jest/issues/1065 got it, will do
"2021-11-25T17:22:57Z"
27.3
[]
[ "packages/jest-mock/src/__tests__/index.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,213
jestjs__jest-12213
[ "12200" ]
4eabd9d23ebf1c6a4170bca6defd9574d3d2092d
diff --git a/packages/jest-runtime/src/index.ts b/packages/jest-runtime/src/index.ts --- a/packages/jest-runtime/src/index.ts +++ b/packages/jest-runtime/src/index.ts @@ -1924,7 +1924,13 @@ export default class Runtime { }; const fn = this._moduleMocker.fn.bind(this._moduleMocker); const spyOn = this._moduleMocker.spyOn.bind(this._moduleMocker); - const mocked = this._moduleMocker.mocked.bind(this._moduleMocker); + const mocked = + this._moduleMocker.mocked?.bind(this._moduleMocker) ?? + (() => { + throw new Error( + 'Your test environment does not support `mocked`, please update it.', + ); + }); const setTimeout = (timeout: number) => { if (this._environment.global.jasmine) {
diff --git a/e2e/__tests__/testEnvironment.test.ts b/e2e/__tests__/testEnvironment.test.ts --- a/e2e/__tests__/testEnvironment.test.ts +++ b/e2e/__tests__/testEnvironment.test.ts @@ -5,9 +5,18 @@ * LICENSE file in the root directory of this source tree. */ -import {json as runWithJson} from '../runJest'; +import {tmpdir} from 'os'; +import * as path from 'path'; +import slash = require('slash'); +import {cleanup, createEmptyPackage, writeFiles} from '../Utils'; +import runJest, {json as runWithJson} from '../runJest'; import * as testFixturePackage from '../test-environment/package.json'; +const DIR = path.resolve(tmpdir(), 'test-env-no-mocked'); + +beforeEach(() => cleanup(DIR)); +afterAll(() => cleanup(DIR)); + it('respects testEnvironment docblock', () => { expect(testFixturePackage.jest.testEnvironment).toEqual('node'); @@ -16,3 +25,40 @@ it('respects testEnvironment docblock', () => { expect(result.success).toBe(true); expect(result.numTotalTests).toBe(3); }); + +it('handles missing `mocked` property', () => { + createEmptyPackage(DIR); + writeFiles(DIR, { + 'env.js': ` + const Node = require('${slash( + require.resolve('jest-environment-node'), + )}'); + + module.exports = class Thing extends Node { + constructor(...args) { + super(...args); + + this.moduleMocker.mocked = undefined; + } + }; + `, + 'test.js': ` + /** + * @jest-environment ./env.js + */ + + jest.mocked(); + + test('halla', () => { + expect(global.thing).toBe('nope'); + }); + `, + }); + + const {exitCode, stderr} = runJest(DIR); + + expect(exitCode).toBe(1); + expect(stderr).toContain( + 'Your test environment does not support `mocked`, please update it.', + ); +});
[Bug]: TypeError: Cannot read properties of undefined (reading 'bind') ### Version 27.4.5 ### Steps to reproduce 1. clone repo https://github.com/rainbow-industries/jest-bug 2. npm i 3. npm test 4. experience the bug ```bash npm test > [email protected] test > jest --config=jest.config.js FAIL ./demo.test.js ● Test suite failed to run TypeError: Cannot read properties of undefined (reading 'bind') at Runtime._createJestObjectFor (node_modules/jest-runtime/build/index.js:2193:46) Test Suites: 1 failed, 1 total Tests: 0 total Snapshots: 0 total Time: 0.372 s Ran all test suites ``` ### Expected behavior I expect the test not to fail since it just contains one console.log statement. ### Actual behavior The test fails with an error that looks very similar to this regression: https://github.com/facebook/jest/issues/12189#issuecomment-1001059449 ### Additional context I'd like to run test without transformations with ES Modules. That's the reason i use `jest-environment-jsdom-sixteen` as `testEnvironment` in the config. ### Environment ```shell System: OS: Linux 5.11 Ubuntu 20.04.3 LTS (Focal Fossa) CPU: (16) x64 AMD Ryzen 7 2700X Eight-Core Processor Binaries: Node: 16.13.1 - /usr/bin/node npm: 8.1.2 - /usr/bin/npm npmPackages: jest: ^27.4.5 => 27.4.5 ```
I have this exact error with jest 27.4.5. Add `"jest-environment-jsdom": "^27.4.4"` fix it. Reversing jest to 26 also fix it.
"2022-01-04T22:09:03Z"
27.4
[]
[ "e2e/__tests__/testEnvironment.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,322
jestjs__jest-12322
[ "8653" ]
d2f03eff4cb2bcff4d49f644a7f235879aed1bc6
diff --git a/packages/jest-changed-files/src/hg.ts b/packages/jest-changed-files/src/hg.ts --- a/packages/jest-changed-files/src/hg.ts +++ b/packages/jest-changed-files/src/hg.ts @@ -18,7 +18,7 @@ const adapter: SCMAdapter = { const args = ['status', '-amnu']; if (options.withAncestor) { - args.push('--rev', 'min((!public() & ::.)+.)^'); + args.push('--rev', 'first(min(!public() & ::.)^+.^)'); } else if (options.changedSince) { args.push('--rev', `ancestor(., ${options.changedSince})`); } else if (options.lastCommit === true) {
diff --git a/e2e/__tests__/jestChangedFiles.test.ts b/e2e/__tests__/jestChangedFiles.test.ts --- a/e2e/__tests__/jestChangedFiles.test.ts +++ b/e2e/__tests__/jestChangedFiles.test.ts @@ -362,14 +362,6 @@ test('handles a bad revision for "changedSince", for git', async () => { }); testIfHg('gets changed files for hg', async () => { - if (process.env.CI) { - // Circle and Travis have very old version of hg (v2, and current - // version is v4.2) and its API changed since then and not compatible - // any more. Changing the SCM version on CIs is not trivial, so we'll just - // skip this test and run it only locally. - return; - } - // file1.txt is used to make a multi-line commit message // with `hg commit -l file1.txt`. // This is done to ensure that `changedFiles` only returns files
Tests failing for mercurial changed files ## πŸ› Bug Report Currently, when cloning straight from [the current version `master`](https://github.com/facebook/jest/tree/d051b0da359c5930ab4c3ea69fe5524266622c96) [the test for getting changed files in `hg` is failing](https://github.com/facebook/jest/blob/d051b0da359c5930ab4c3ea69fe5524266622c96/e2e/__tests__/jestChangedFiles.test.ts#L263-L369). I'm running the latest version of `hg` (5.0.1) in an old Mac I have but the same happens in any of the other machines I've cloned jest into. The error I get is due to the pattern we use to specify a `revset` returning an empty revision range. ```bash Command failed: hg status -amnu --rev min((!public() & ::.)+.)^ /var/folders/p0/0npmk50s57v5sgzb_s9z2xmc0000gn/T/jest-changed-files-test-dir /var/folders/p0/0npmk50s57v5sgzb_s9z2xmc0000gn/T/jest-changed-files-test-dir/nested-dir /var/folders/p0/0npmk50s57v5sgzb_s9z2xmc0000gn/T/jest-changed-files-test-dir/nested-dir/second-nested-dir abort: empty revision range at makeError (node_modules/execa/index.js:181:11) ``` Related PRs: * https://github.com/facebook/jest/pull/7880 * https://github.com/facebook/jest/pull/8066 * https://github.com/facebook/jest/pull/5476 ## To Reproduce Steps to reproduce the behavior: 1. Clone `jest` 2. Install `hg` 3. Install dependencies 4. Run tests (`yarn test`) ## Expected behavior The test linked above should pass since the `withAncestor` option should return both the current changes and the changes in the last commit. Currently, the pattern we use to specify a `revset` errors because the pattern specifies an empty range. **I managed to solve this locally myself, by using `ancestors(.)::0` as the argument for `--revset`, but I'm not sure whether this solution covers all necessary edge-cases**. Since I don't use `mercurial` myself I thought it was better to open an issue and see what other mercurial users think of this solution. If this is a satisfactory solution I can open a PR as I already have these changes commited to [this branch](https://github.com/lucasfcosta/jest/tree/fix-withAncestor-hg-revset). ## Link to repl or repo (highly encouraged) As per the `steps to reproduce` section, this repo is its own reproducible repo. ## Run `npx envinfo --preset jest` Paste the results here: ```bash System: OS: macOS High Sierra 10.13.6 CPU: (4) x64 Intel(R) Core(TM) i5-5257U CPU @ 2.70GHz Binaries: Node: 10.16.0 - ~/.nvm/versions/node/v10.16.0/bin/node Yarn: 1.15.0 - /usr/local/bin/yarn npm: 6.9.0 - ~/.nvm/versions/node/v10.16.0/bin/npm ```
I have same problem. ``` $ git rev-parse HEAD e3f4c65140f08a2ec81e5a8260704c1d201e33c1 ``` ``` System: OS: macOS 10.15.1 CPU: (8) x64 Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz Binaries: Node: 12.13.0 - ~/.nvm/versions/node/v12.13.0/bin/node Yarn: 1.19.1 - /usr/local/bin/yarn npm: 6.12.0 - ~/.nvm/versions/node/v12.13.0/bin/npm ``` ``` $ hg --version Mercurial Distributed SCM (version 5.1.2) ``` ``` FAIL e2e/__tests__/jestChangedFiles.test.ts (26.869s) ● gets changed files for hg abort: empty revision range 334 | }); 335 | > 336 | ({changedFiles: files} = await getChangedFilesForRoots(roots, { | ^ 337 | withAncestor: true, 338 | })); 339 | // Returns files from current uncommitted state + the last commit at makeError (node_modules/execa/lib/error.js:58:11) at runMicrotasks (<anonymous>) at async Promise.all (index 0) at Object.<anonymous> (e2e/__tests__/jestChangedFiles.test.ts:336:28) FAIL e2e/__tests__/onlyChanged.test.ts (93.791s) ● gets changed files for hg expect(received).toMatch(expected) Expected pattern: /PASS __tests__(\/|\\)file2.test.js/ Received string: "Β· ● Test suite failed to runΒ· abort: empty revision range " 299 | 300 | ({stdout, stderr} = runJest(DIR, ['-o', '--changedFilesWithAncestor'])); > 301 | expect(stderr).toMatch(/PASS __tests__(\/|\\)file2.test.js/); | ^ 302 | expect(stderr).toMatch(/PASS __tests__(\/|\\)file3.test.js/); 303 | }); 304 | at Object.toMatch (e2e/__tests__/onlyChanged.test.ts:301:18) ``` Yeah, they fail for me as well. I've never used mercurial, so I have no real point of reference here. Maybe @scotthovestadt can help? @SimenB I'd be happy to open a PR if the solution I've described when I opened the issue is valid, but given that I'm not an SVN expert, I'd like to confirm before doing it: > I managed to solve this locally myself, by using ancestors(.)::0 as the argument for --revset, but I'm not sure whether this solution covers all necessary edge-cases. Since I don't use mercurial myself I thought it was better to open an issue and see what other mercurial users think of this solution. If this is a satisfactory solution I can open a PR as I already have these changes commited to this branch. I have absolutely no clue, unfortunately... Maybe @quark-zju knows? This is mostly likely related to phase configuration. In Mercurial, the common workflow is that the "master" branch has public commits, and feature branches have "draft" commits. I suspect all commits are "drafts" and there are commits that should be public but are not public in these cases. In that case, `hg phase --public master` will probably fix it. I think the revset can be changed to: first(min(!public() & ::.)^+.^) to work with incorrect phase setup (all commits are draft) while still being able to take advantage of phases to detect draft feature branch. Explanation: - `min(!public() & ::.)^`: This is the parent of the first commit of the feature branch, or an empty set if phases is not setup correctly. - `first(x+y)`: This picks `x` if `x` is not empty, or `y` if `x` is empty.
"2022-02-07T18:56:08Z"
28.0
[]
[ "e2e/__tests__/jestChangedFiles.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,531
jestjs__jest-12531
[ "12106" ]
ab7480498f2b013fa4cd94b80da9311f59a5f008
diff --git a/packages/jest-runtime/src/index.ts b/packages/jest-runtime/src/index.ts --- a/packages/jest-runtime/src/index.ts +++ b/packages/jest-runtime/src/index.ts @@ -1141,6 +1141,8 @@ export default class Runtime { this._esmoduleRegistry.clear(); this._cjsNamedExports.clear(); this._moduleMockRegistry.clear(); + this._cacheFS.clear(); + this._fileTransforms.clear(); if (this._environment) { if (this._environment.global) {
diff --git a/e2e/__tests__/clearFSAndTransformCache.test.ts b/e2e/__tests__/clearFSAndTransformCache.test.ts new file mode 100644 --- /dev/null +++ b/e2e/__tests__/clearFSAndTransformCache.test.ts @@ -0,0 +1,51 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import {tmpdir} from 'os'; +import * as path from 'path'; +import {cleanup, writeFiles} from '../Utils'; +import runJest from '../runJest'; + +const dir = path.resolve(tmpdir(), 'clear_FS_and_transform_cache'); +const testFileContent = ` +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const absoluteTestHelperFile = path.resolve(__dirname, './testHelper.js'); + +test('value is 1', () => { + const value = require('./testHelper'); + expect(value).toBe(1); +}); + +test('value is 1 after file is changed', () => { + fs.writeFileSync(absoluteTestHelperFile, 'module.exports = 2;'); + const value = require('./testHelper'); + expect(value).toBe(1); +}); + +test('value is 2 after calling "jest.resetModules"', () => { + jest.resetModules(); + const value = require('./testHelper'); + expect(value).toBe(2); +}); +`; + +beforeEach(() => cleanup(dir)); +afterAll(() => cleanup(dir)); + +test('clear FS and transform cache', () => { + writeFiles(dir, { + 'package.json': JSON.stringify({jest: {testEnvironment: 'node'}}), + 'test.js': testFileContent, + 'testHelper.js': 'module.exports = 1;', + }); + const {exitCode} = runJest(dir); + expect(exitCode).toBe(0); +});
[Bug]: failed to clear require cache ### Version 27.4.2 ### Steps to reproduce 1. clone my repo [email protected]:xkn1ght/jest-require.git 2. `pnpm install` 3. `pnpx jest ./index.spec.js` ### Expected behavior Expect to see require a totally different module, but I get the same value a = 1. However, the refenenceβ€˜s expect works, as the doc said: `https://jestjs.io/docs/jest-object#jestresetmodules` ### Actual behavior Should it be any way to clear require cache ### Additional context _No response_ ### Environment ```shell Node: 14.17.4 os: Darwin ```
I have the same problem. https://stackoverflow.com/questions/71001519/unable-to-invalidate-the-require-cache-jest-cache Currently, jest does not support this feature, but i think it could be supported.
"2022-03-02T03:35:23Z"
28.0
[]
[ "e2e/__tests__/clearFSAndTransformCache.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,674
jestjs__jest-12674
[ "12672" ]
1cce9a7dc194103576fad6c2086052efbea5ae77
diff --git a/packages/jest-mock/src/index.ts b/packages/jest-mock/src/index.ts --- a/packages/jest-mock/src/index.ts +++ b/packages/jest-mock/src/index.ts @@ -971,8 +971,12 @@ export class ModuleMocker { metadata.value = component; return metadata; } else if (type === 'function') { - // @ts-expect-error component is a function so it has a name - metadata.name = component.name; + // @ts-expect-error component is a function so it has a name, but not + // necessarily a string: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/name#function_names_in_classes + const componentName = component.name; + if (typeof componentName === 'string') { + metadata.name = componentName; + } if (this.isMockFunction(component)) { metadata.mockImpl = component.getMockImplementation() as T; }
diff --git a/packages/jest-mock/src/__tests__/index.test.ts b/packages/jest-mock/src/__tests__/index.test.ts --- a/packages/jest-mock/src/__tests__/index.test.ts +++ b/packages/jest-mock/src/__tests__/index.test.ts @@ -30,6 +30,17 @@ describe('moduleMocker', () => { expect(metadata.name).toBe('x'); }); + it('does not return broken name property', () => { + class By { + static name() { + return 'this is not a name'; + } + } + const metadata = moduleMocker.getMetadata(By); + expect(typeof By.name).toBe('function'); + expect(metadata).not.toHaveProperty('name'); + }); + it('mocks constant values', () => { const metadata = moduleMocker.getMetadata(Symbol.for('bowties.are.cool')); expect(metadata.value).toEqual(Symbol.for('bowties.are.cool'));
[Bug]: ### Version 27.5.1 ### Steps to reproduce 1. Clone repo at https://github.com/kpeters-cbsi/jest-fails-to-mock-selenium-webdriver 2. `yarn install` 3. `yarn test` ### Expected behavior I expect the test to complete successfully. ### Actual behavior ``` FAIL test/index.ts ● Test suite failed to run TypeError: name.startsWith is not a function at ModuleMocker._createMockFunction (node_modules/jest-mock/build/index.js:594:22) at Array.forEach (<anonymous>) ``` ### Additional context I did a bit of debugging on my own, and I think this is happening in [_createMockFunction](https://github.com/facebook/jest/blob/67c1aa20c5fec31366d733e901fee2b981cb1850/packages/jest-mock/src/index.ts#L810): ``` if (name && name.startsWith(boundFunctionPrefix)) { <--- HERE do { name = name.substring(boundFunctionPrefix.length); // Call bind() just to alter the function name. bindCall = '.bind(null)'; } while (name && name.startsWith(boundFunctionPrefix)); } ``` Something in `selenium-webdriver` has a `name` property that's a function, rather than a string, which is causing that error. I wasn't able to figure out which class in `selenium-webdriver` was the culprit, however. ### Environment ```shell System: OS: macOS 11.6 CPU: (12) x64 Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz Binaries: Node: 14.19.0 - ~/.nvm/versions/node/v14.19.0/bin/node Yarn: 3.2.0 - ~/.nvm/versions/node/v14.19.0/bin/yarn npm: 8.5.5 - ~/.nvm/versions/node/v14.19.0/bin/npm npmPackages: jest: ^27.5.1 => 27.5.1 ```
This is due to https://github.com/SeleniumHQ/selenium/blob/8b53734fd2bc97f7d084dc8326bf4b6c47b7052e/javascript/node/selenium-webdriver/lib/by.js#L205-L207 messing with our logic for gettting the name of a function here: https://github.com/facebook/jest/blob/1cce9a7dc194103576fad6c2086052efbea5ae77/packages/jest-mock/src/index.ts#L974-L975. It looks weird in Node's REPL as well. <img width="327" alt="image" src="https://user-images.githubusercontent.com/1404810/163550046-5dfbf788-617d-48f1-bdf6-64cdde310bda.png"> <img width="363" alt="image" src="https://user-images.githubusercontent.com/1404810/163550112-492e1392-3c68-455b-8867-6c3aeab4202a.png"> It is also an error in typescript: <img width="699" alt="image" src="https://user-images.githubusercontent.com/1404810/163550624-31b45e86-6191-4dbd-90ea-893e1d35f509.png"> https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/name#function_names_in_classes
"2022-04-15T09:09:48Z"
28.0
[]
[ "packages/jest-mock/src/__tests__/index.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,763
jestjs__jest-12763
[ "12752" ]
c8c32d3704469557ecfc3d4a9e81002f11e2075d
diff --git a/packages/expect-utils/package.json b/packages/expect-utils/package.json --- a/packages/expect-utils/package.json +++ b/packages/expect-utils/package.json @@ -20,6 +20,7 @@ "jest-get-type": "^28.0.2" }, "devDependencies": { + "immutable": "^4.0.0", "jest-matcher-utils": "^28.0.2" }, "engines": { diff --git a/packages/expect-utils/src/jasmineUtils.ts b/packages/expect-utils/src/jasmineUtils.ts --- a/packages/expect-utils/src/jasmineUtils.ts +++ b/packages/expect-utils/src/jasmineUtils.ts @@ -238,9 +238,10 @@ function isDomNode(obj: any): boolean { ); } -// SENTINEL constants are from https://github.com/facebook/immutable-js +// SENTINEL constants are from https://github.com/immutable-js/immutable-js/tree/main/src/predicates const IS_KEYED_SENTINEL = '@@__IMMUTABLE_KEYED__@@'; const IS_SET_SENTINEL = '@@__IMMUTABLE_SET__@@'; +const IS_LIST_SENTINEL = '@@__IMMUTABLE_LIST__@@'; const IS_ORDERED_SENTINEL = '@@__IMMUTABLE_ORDERED__@@'; export function isImmutableUnorderedKeyed(maybeKeyed: any) { @@ -258,3 +259,10 @@ export function isImmutableUnorderedSet(maybeSet: any) { !maybeSet[IS_ORDERED_SENTINEL] ); } + +export function isImmutableList(maybeList: any) { + return !!( + maybeList && + maybeList[IS_LIST_SENTINEL] + ); +} diff --git a/packages/expect-utils/src/utils.ts b/packages/expect-utils/src/utils.ts --- a/packages/expect-utils/src/utils.ts +++ b/packages/expect-utils/src/utils.ts @@ -10,6 +10,7 @@ import {isPrimitive} from 'jest-get-type'; import { equals, isA, + isImmutableList, isImmutableUnorderedKeyed, isImmutableUnorderedSet, } from './jasmineUtils'; @@ -254,10 +255,12 @@ export const iterableEquality = ( return false; } - const aEntries = Object.entries(a); - const bEntries = Object.entries(b); - if (!equals(aEntries, bEntries)) { - return false; + if (!isImmutableList(a)) { + const aEntries = Object.entries(a); + const bEntries = Object.entries(b); + if (!equals(aEntries, bEntries)) { + return false; + } } // Remove the first value from the stack of traversed values.
diff --git a/packages/expect-utils/src/__tests__/utils.test.ts b/packages/expect-utils/src/__tests__/utils.test.ts --- a/packages/expect-utils/src/__tests__/utils.test.ts +++ b/packages/expect-utils/src/__tests__/utils.test.ts @@ -6,6 +6,7 @@ * */ +import {List} from 'immutable'; import {stringify} from 'jest-matcher-utils'; import { arrayBufferEquality, @@ -517,6 +518,13 @@ describe('iterableEquality', () => { expect(iterableEquality(a, b)).toBe(true); }); + + test('returns true when given Immutable Lists without an OwnerID', () => { + const a = List([1, 2, 3]); + const b = a.filter(v => v > 0); + + expect(iterableEquality(a, b)).toBe(true); + }); }); describe('arrayBufferEquality', () => {
[Bug]: Jest 28 fails on Immutable.List().filter toEqual comparison ### Version 28.0.1 ### Steps to reproduce 1. Install latest `immutable` - (e.g. `yarn add immutable@^4.0.0`). 2. Create the following test file: ```js const { List } = require("immutable"); const numbers = List([1, 2]); describe("test", () => { it("fails in Jest 28.0.1, passes in Jest 27.5.1", () => { const twos = numbers.filter((num) => num === 2); expect(twos).toEqual(List([2])); }); }); ``` 3. Run the file in Jest 28.0.1. ### Expected behavior I expect the test to pass, as it does in Jest 27.5.1 ### Actual behavior The test fails with the following error: ``` FAIL ./example.test.js test βœ• fails in Jest 28.0.1, passes in Jest 27.5.1 (6 ms) ● test β€Ί fails in Jest 28.0.1, passes in Jest 27.5.1 expect(received).toEqual(expected) // deep equality Expected: Immutable.List [2] Received: serializes to the same string 5 | it("fails in Jest 28.0.1, passes in Jest 27.5.1", () => { 6 | const evens = numbers.filter((num) => num === 2); > 7 | expect(evens).toEqual(List([2])); | ^ 8 | }); 9 | }); 10 | at Object.toEqual (example.test.js:7:17) ``` ### Additional context I see the same issue with `immutable@^3` as well. Other deep equality comparisons with ImmutableJS objects that you'd expect to pass, do pass in Jest 28: ```js const { List, Map } = require("immutable"); it("passes1", () => { expect(List([2])).toEqual(List([2])); }); it("passes2", () => { expect(Map({ hello: "world" })).toEqual(Map({ hello: "world" })); }); it("passes3", () => { expect(List([Map({ hello: "world" }), Map({ foo: "bar" })])).toEqual( List([Map({ hello: "world" }), Map({ foo: "bar" })]) ); }); ``` The only reliable one I can reproduce is when a `List` gets `filter`'d. ### Environment ```shell System: OS: macOS 10.15.7 CPU: (16) x64 Intel(R) Core(TM) i9-9880H CPU @ 2.30GHz Binaries: Node: 16.14.2 - ~/.nvm/versions/node/v16.14.2/bin/node Yarn: 1.22.18 - ~/.nvm/versions/node/v16.14.2/bin/yarn npm: 8.5.0 - ~/.nvm/versions/node/v16.14.2/bin/npm npmPackages: jest: ^27 => 27.5.1 ```
Bisecting, this was introduced in #8359. @romellem would you be able to provide a PR fixing this? πŸ™‚ I can definitely take a look. Thanks for tracking down the offending commit, I'll do some digging and see what [filter][1] is doing with `entries` compared a regular `List`. Any ideas that are coming to mind? [1]: https://immutable-js.com/docs/v4.0.0/List/#filter() Had some time to dig in, the problem is `.filter` (maybe other Collection methods too?) sets an `ownerID` on the `_tail` property whereas a direct `List()` does not. Since Lists are iterable, this path gets hit and eventually gets flagged as a difference. <table><tr><td><code>List([1]).filter(v => v === 1)</code></td><td><code>List([1])</code></td></tr> <tr><td><img width="300" src="https://user-images.githubusercontent.com/8504000/165560503-dea2919b-5356-4403-98d8-7bd9db815cc5.png" alt="filtered"></td><td> <img width="300" src="https://user-images.githubusercontent.com/8504000/165560520-bc14d590-08d3-4fec-8ead-95557ac4b9a4.png" alt="direct"></td></tr></table> I see there are checks for [`isImmutableUnorderedSet()`](https://github.com/facebook/jest/blob/27eb77e86f388ee88fd3badb7ce6e872d82405ad/packages/expect-utils/src/utils.ts#L183) and [`isImmutableUnorderedKeyed()`](https://github.com/facebook/jest/blob/27eb77e86f388ee88fd3badb7ce6e872d82405ad/packages/expect-utils/src/utils.ts#L205). Could a fix for this be to check if the object `isList` via its [`'@@__IMMUTABLE_LIST__@@'`](https://github.com/immutable-js/immutable-js/blob/f2dec5aa72c8ce0d893fa1b0eebe67f254ee2034/src/predicates/isList.js#L1) key, and check its iterable values similar to what happens with a `Set`? That sounds reasonable to me!
"2022-04-28T04:40:27Z"
28.0
[]
[ "packages/expect-utils/src/__tests__/utils.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,899
jestjs__jest-12899
[ "12860" ]
6277505d953c9d8fef938d6a113bba86c38b72f1
diff --git a/packages/expect-utils/src/jasmineUtils.ts b/packages/expect-utils/src/jasmineUtils.ts --- a/packages/expect-utils/src/jasmineUtils.ts +++ b/packages/expect-utils/src/jasmineUtils.ts @@ -266,3 +266,11 @@ export function isImmutableList(maybeList: any) { maybeList[IS_LIST_SENTINEL] ); } + +export function isImmutableOrderedKeyed(maybeKeyed: any) { + return !!( + maybeKeyed && + maybeKeyed[IS_KEYED_SENTINEL] && + maybeKeyed[IS_ORDERED_SENTINEL] + ); +} diff --git a/packages/expect-utils/src/utils.ts b/packages/expect-utils/src/utils.ts --- a/packages/expect-utils/src/utils.ts +++ b/packages/expect-utils/src/utils.ts @@ -11,6 +11,7 @@ import { equals, isA, isImmutableList, + isImmutableOrderedKeyed, isImmutableUnorderedKeyed, isImmutableUnorderedSet, } from './jasmineUtils'; @@ -255,7 +256,7 @@ export const iterableEquality = ( return false; } - if (!isImmutableList(a)) { + if (!isImmutableList(a) && !isImmutableOrderedKeyed(a)) { const aEntries = Object.entries(a); const bEntries = Object.entries(b); if (!equals(aEntries, bEntries)) {
diff --git a/packages/expect-utils/src/__tests__/utils.test.ts b/packages/expect-utils/src/__tests__/utils.test.ts --- a/packages/expect-utils/src/__tests__/utils.test.ts +++ b/packages/expect-utils/src/__tests__/utils.test.ts @@ -6,7 +6,7 @@ * */ -import {List} from 'immutable'; +import {List, OrderedMap} from 'immutable'; import {stringify} from 'jest-matcher-utils'; import { arrayBufferEquality, @@ -525,6 +525,12 @@ describe('iterableEquality', () => { expect(iterableEquality(a, b)).toBe(true); }); + + test('returns true when given Immutable OrderedMaps without an OwnerID', () => { + const a = OrderedMap().set('saving', true); + const b = OrderedMap().merge({saving: true}); + expect(iterableEquality(a, b)).toBe(true); + }); }); describe('arrayBufferEquality', () => {
[Bug]: Another Immutable equals comparison failure ### Version 28.1.0 ### Steps to reproduce ``` import Immutable from 'immutable'; it('works with immutable objects', () => { const a = Immutable.OrderedMap().set('saving', true); const b = Immutable.OrderedMap().merge({ saving: true }); console.log(a); console.log(b); expect(Immutable.is(a, b)).toBe(true); expect(a).toEqual(b); }); ``` ### Expected behavior The test should pass ### Actual behavior The `isEqual` test fails. The `Immutable.is(a, b)` assertion passes. ### Additional context This is possibly the same issue as #12752 that wasn't addressed in that fix. The call to `merge` sets an `ownerID` value on the `_root` property whereas the `set` call does not. ### Environment ```shell System: OS: macOS 12.4 CPU: (10) x64 Apple M1 Pro Binaries: Node: 14.18.1 - /var/folders/b4/0jgrn8fx6vn5bd59pmybg6zw0000gn/T/fnm_multishells/4762_1652895535386/bin/node Yarn: 1.22.18 - ~/workspace/zenpayroll/node_modules/.bin/yarn npm: 8.3.1 - /opt/homebrew/bin/npm npmPackages: jest: ^28.1.0 => 28.1.0 ```
PR welcome πŸ˜€
"2022-06-01T22:06:54Z"
28.1
[]
[ "packages/expect-utils/src/__tests__/utils.test.ts" ]
TypeScript
[]
[]
jestjs/jest
12,977
jestjs__jest-12977
[ "12976" ]
93e115035057c4fdbf5466211e102f60bb08a6e6
diff --git a/packages/expect-utils/src/jasmineUtils.ts b/packages/expect-utils/src/jasmineUtils.ts --- a/packages/expect-utils/src/jasmineUtils.ts +++ b/packages/expect-utils/src/jasmineUtils.ts @@ -274,3 +274,12 @@ export function isImmutableOrderedKeyed(maybeKeyed: any) { maybeKeyed[IS_ORDERED_SENTINEL] ); } + + +export function isImmutableOrderedSet(maybeSet: any) { + return !!( + maybeSet && + maybeSet[IS_SET_SENTINEL] && + maybeSet[IS_ORDERED_SENTINEL] + ); +} diff --git a/packages/expect-utils/src/utils.ts b/packages/expect-utils/src/utils.ts --- a/packages/expect-utils/src/utils.ts +++ b/packages/expect-utils/src/utils.ts @@ -12,6 +12,7 @@ import { isA, isImmutableList, isImmutableOrderedKeyed, + isImmutableOrderedSet, isImmutableUnorderedKeyed, isImmutableUnorderedSet, } from './jasmineUtils'; @@ -256,7 +257,11 @@ export const iterableEquality = ( return false; } - if (!isImmutableList(a) && !isImmutableOrderedKeyed(a)) { + if ( + !isImmutableList(a) && + !isImmutableOrderedKeyed(a) && + !isImmutableOrderedSet(a) + ) { const aEntries = Object.entries(a); const bEntries = Object.entries(b); if (!equals(aEntries, bEntries)) {
diff --git a/packages/expect-utils/src/__tests__/utils.test.ts b/packages/expect-utils/src/__tests__/utils.test.ts --- a/packages/expect-utils/src/__tests__/utils.test.ts +++ b/packages/expect-utils/src/__tests__/utils.test.ts @@ -6,7 +6,7 @@ * */ -import {List, OrderedMap} from 'immutable'; +import {List, OrderedMap, OrderedSet} from 'immutable'; import {stringify} from 'jest-matcher-utils'; import { arrayBufferEquality, @@ -531,6 +531,12 @@ describe('iterableEquality', () => { const b = OrderedMap().merge({saving: true}); expect(iterableEquality(a, b)).toBe(true); }); + + test('returns true when given Immutable OrderedSets without an OwnerID', () => { + const a = OrderedSet().add('newValue'); + const b = List(['newValue']).toOrderedSet(); + expect(iterableEquality(a, b)).toBe(true); + }); }); describe('arrayBufferEquality', () => {
[Bug]: Immutable.OrderedSet equality comparison failure ### Version 28.1.2 ### Steps to reproduce ``` import { OrderedSet, List } from 'immutable'; it('works with immutable objects', () => { const a = OrderedSet().add('newValue'); const b = List(['newValue']).toOrderedSet(); expect(Immutable.is(a, b)).toBe(true); // passes expect(a).toEqual(b); // fails }); ``` ### Expected behavior The test should pass ### Actual behavior The `isEqual` test fails. The `Immutable.is(a, b)` assertion passes. ### Additional context This is the same issue as #12860, but needs its own condition. ### Environment ```shell System: OS: macOS 12.4 CPU: (10) x64 Apple M1 Pro Binaries: Node: 14.18.1 - /var/folders/b4/0jgrn8fx6vn5bd59pmybg6zw0000gn/T/fnm_multishells/4762_1652895535386/bin/node Yarn: 1.22.18 - ~/workspace/zenpayroll/node_modules/.bin/yarn npm: 8.3.1 - /opt/homebrew/bin/npm npmPackages: jest: ^28.1.2 => 28.1.2 ```
"2022-06-29T22:32:32Z"
28.1
[]
[ "packages/expect-utils/src/__tests__/utils.test.ts" ]
TypeScript
[]
[]
jestjs/jest
13,055
jestjs__jest-13055
[ "13051" ]
837af9eab05d6c8cbaa03a2528bce05c050280c5
diff --git a/packages/expect-utils/src/jasmineUtils.ts b/packages/expect-utils/src/jasmineUtils.ts --- a/packages/expect-utils/src/jasmineUtils.ts +++ b/packages/expect-utils/src/jasmineUtils.ts @@ -243,6 +243,7 @@ const IS_KEYED_SENTINEL = '@@__IMMUTABLE_KEYED__@@'; const IS_SET_SENTINEL = '@@__IMMUTABLE_SET__@@'; const IS_LIST_SENTINEL = '@@__IMMUTABLE_LIST__@@'; const IS_ORDERED_SENTINEL = '@@__IMMUTABLE_ORDERED__@@'; +const IS_RECORD_SYMBOL = '@@__IMMUTABLE_RECORD__@@'; export function isImmutableUnorderedKeyed(maybeKeyed: any) { return !!( @@ -283,3 +284,10 @@ export function isImmutableOrderedSet(maybeSet: any) { maybeSet[IS_ORDERED_SENTINEL] ); } + +export function isImmutableRecord(maybeSet: any) { + return !!( + maybeSet && + maybeSet[IS_RECORD_SYMBOL] + ); +} \ No newline at end of file diff --git a/packages/expect-utils/src/utils.ts b/packages/expect-utils/src/utils.ts --- a/packages/expect-utils/src/utils.ts +++ b/packages/expect-utils/src/utils.ts @@ -13,6 +13,7 @@ import { isImmutableList, isImmutableOrderedKeyed, isImmutableOrderedSet, + isImmutableRecord, isImmutableUnorderedKeyed, isImmutableUnorderedSet, } from './jasmineUtils'; @@ -260,7 +261,8 @@ export const iterableEquality = ( if ( !isImmutableList(a) && !isImmutableOrderedKeyed(a) && - !isImmutableOrderedSet(a) + !isImmutableOrderedSet(a) && + !isImmutableRecord(a) ) { const aEntries = Object.entries(a); const bEntries = Object.entries(b);
diff --git a/packages/expect-utils/src/__tests__/utils.test.ts b/packages/expect-utils/src/__tests__/utils.test.ts --- a/packages/expect-utils/src/__tests__/utils.test.ts +++ b/packages/expect-utils/src/__tests__/utils.test.ts @@ -6,7 +6,7 @@ * */ -import {List, OrderedMap, OrderedSet} from 'immutable'; +import {List, OrderedMap, OrderedSet, Record} from 'immutable'; import {stringify} from 'jest-matcher-utils'; import { arrayBufferEquality, @@ -537,6 +537,13 @@ describe('iterableEquality', () => { const b = List(['newValue']).toOrderedSet(); expect(iterableEquality(a, b)).toBe(true); }); + + test('returns true when given Immutable Record without an OwnerID', () => { + class TestRecord extends Record({dummy: ''}) {} + const a = new TestRecord().merge({dummy: 'data'}); + const b = new TestRecord().set('dummy', 'data'); + expect(iterableEquality(a, b)).toBe(true); + }); }); describe('arrayBufferEquality', () => {
[Bug]: Immutable.Record equality comparison failure ### Version 28.1.3 ### Steps to reproduce 1. Install latest Immutable.js(v4.1.0) and jest. - e.g. `yarn install immutable jest` 2. Create the following test file and run the test. ```javascript const Immutable = require("immutable"); it("passed in jest 27.5.1, but failure in jest 28.1.3", () => { class MyRecord extends Immutable.Record({ dummy: "" }) {} const a = new MyRecord().merge({ dummy: "data" }); const b = new MyRecord().set("dummy", "data"); expect(Immutable.is(a, b)).toBe(true); expect(a).toEqual(b); }); ``` ### Expected behavior I expect to pass the test. ### Actual behavior The `isEqual` test fails. ``` ● passed in jest 27.5.1, but failure in jest 28.1.3 expect(received).toEqual(expected) // deep equality Expected: Immutable.Record {"dummy": "data"} Received: serializes to the same string 9 | console.log(b._values._tail.ownerID); 10 | expect(Immutable.is(a, b)).toBe(true); > 11 | expect(a).toEqual(b); | ^ 12 | }); 13 | ``` ### Additional context This issue is similar to #12752, #12860, and #12976. The reason to fail may be same these issues. Immutable.Record has `values` of Immutable.List. The test fails because change `values._tail.ownerID` by the operation. ``` console.log a._values._tail.ownerID: OwnerID {} at Object.log (__tests__/index.test.js:8:11) console.log b._values._tail.ownerID: undefined at Object.log (__tests__/index.test.js:9:11) ``` ### Environment ```shell System: OS: macOS 12.4 CPU: (8) x64 Intel(R) Core(TM) i7-1068NG7 CPU @ 2.30GHz Binaries: Node: 16.13.0 - ~/.nodenv/versions/16.13.0/bin/node Yarn: 1.22.17 - ~/.nodenv/versions/16.13.0/bin/yarn npm: 8.1.4 - ~/.nodenv/versions/16.13.0/bin/npm npmPackages: jest: ^28.1.3 => 28.1.3 ```
"2022-07-22T07:16:21Z"
29.0
[]
[ "packages/expect-utils/src/__tests__/utils.test.ts" ]
TypeScript
[]
[]
jestjs/jest
13,866
jestjs__jest-13866
[ "13808" ]
dc0ac7580f9ab1a7e8aa108b206f1eaa2f8e2b15
diff --git a/packages/jest-mock/src/index.ts b/packages/jest-mock/src/index.ts --- a/packages/jest-mock/src/index.ts +++ b/packages/jest-mock/src/index.ts @@ -258,6 +258,8 @@ type MockFunctionConfig = { specificMockImpls: Array<Function>; }; +type SpyState = {reset?: () => void; restore: () => void}; + const MOCK_CONSTRUCTOR_NAME = 'mockConstructor'; const FUNCTION_NAME_RESERVED_PATTERN = /[\s!-/:-@[-`{-~]/; @@ -506,9 +508,8 @@ export class ModuleMocker { private readonly _environmentGlobal: typeof globalThis; private _mockState: WeakMap<Mock, MockFunctionState>; private _mockConfigRegistry: WeakMap<Function, MockFunctionConfig>; - private _spyState: Set<() => void>; + private _spyState: Set<SpyState>; private _invocationCallCounter: number; - private _originalFn: WeakMap<Mock, Function>; /** * @see README.md @@ -521,7 +522,6 @@ export class ModuleMocker { this._mockConfigRegistry = new WeakMap(); this._spyState = new Set(); this._invocationCallCounter = 1; - this._originalFn = new WeakMap(); } private _getSlots(object?: Record<string, any>): Array<string> { @@ -613,27 +613,27 @@ export class ModuleMocker { private _makeComponent<T extends Record<string, any>>( metadata: MockMetadata<T, 'object'>, - restore?: () => void, + spyState?: SpyState, ): T; private _makeComponent<T extends Array<unknown>>( metadata: MockMetadata<T, 'array'>, - restore?: () => void, + spyState?: SpyState, ): T; private _makeComponent<T extends RegExp>( metadata: MockMetadata<T, 'regexp'>, - restore?: () => void, + spyState?: SpyState, ): T; private _makeComponent<T>( metadata: MockMetadata<T, 'constant' | 'collection' | 'null' | 'undefined'>, - restore?: () => void, + spyState?: SpyState, ): T; private _makeComponent<T extends UnknownFunction>( metadata: MockMetadata<T, 'function'>, - restore?: () => void, + spyState?: SpyState, ): Mock<T>; private _makeComponent<T extends UnknownFunction>( metadata: MockMetadata<T>, - restore?: () => void, + spyState?: SpyState, ): Record<string, any> | Array<unknown> | RegExp | T | Mock | undefined { if (metadata.type === 'object') { return new this._environmentGlobal.Object(); @@ -754,8 +754,8 @@ export class ModuleMocker { f._isMockFunction = true; f.getMockImplementation = () => this._ensureMockConfig(f).mockImpl as T; - if (typeof restore === 'function') { - this._spyState.add(restore); + if (spyState != null) { + this._spyState.add(spyState); } this._mockState.set(f, this._defaultMockState()); @@ -775,18 +775,23 @@ export class ModuleMocker { f.mockReset = () => { f.mockClear(); - const originalFn = this._originalFn.get(f); - const originalMockImpl = { - ...this._defaultMockConfig(), - mockImpl: originalFn, - }; - this._mockConfigRegistry.set(f, originalMockImpl); + this._mockConfigRegistry.delete(f); + + if (spyState != null) { + spyState.reset?.(); + } + return f; }; f.mockRestore = () => { - f.mockReset(); - return restore ? restore() : undefined; + f.mockClear(); + this._mockConfigRegistry.delete(f); + + if (spyState != null) { + spyState.restore(); + this._spyState.delete(spyState); + } }; f.mockReturnValueOnce = (value: ReturnType<T>) => @@ -994,14 +999,14 @@ export class ModuleMocker { T extends object, K extends PropertyLikeKeys<T>, >(object: T, propertyKey: K): ReplacedPropertyRestorer<T, K> | undefined { - for (const spyState of this._spyState) { + for (const {restore} of this._spyState) { if ( - 'object' in spyState && - 'property' in spyState && - spyState.object === object && - spyState.property === propertyKey + 'object' in restore && + 'property' in restore && + restore.object === object && + restore.property === propertyKey ) { - return spyState as ReplacedPropertyRestorer<T, K>; + return restore as ReplacedPropertyRestorer<T, K>; } } @@ -1117,6 +1122,15 @@ export class ModuleMocker { return fn; } + private _attachMockImplementation<T extends Function>( + mock: Mock<UnknownFunction>, + original: T, + ) { + mock.mockImplementation(function (this: unknown) { + return original.apply(this, arguments); + }); + } + spyOn< T extends object, K extends PropertyLikeKeys<T>, @@ -1202,29 +1216,43 @@ export class ModuleMocker { if (descriptor && descriptor.get) { const originalGet = descriptor.get; - mock = this._makeComponent({type: 'function'}, () => { - descriptor!.get = originalGet; - Object.defineProperty(object, methodKey, descriptor!); - }); + mock = this._makeComponent( + {type: 'function'}, + { + reset: () => { + this._attachMockImplementation(mock, original); + }, + restore: () => { + descriptor!.get = originalGet; + Object.defineProperty(object, methodKey, descriptor!); + }, + }, + ); descriptor.get = () => mock; Object.defineProperty(object, methodKey, descriptor); } else { - mock = this._makeComponent({type: 'function'}, () => { - if (isMethodOwner) { - object[methodKey] = original; - } else { - delete object[methodKey]; - } - }); - // @ts-expect-error overriding original method with a Mock + mock = this._makeComponent( + {type: 'function'}, + { + reset: () => { + this._attachMockImplementation(mock, original); + }, + restore: () => { + if (isMethodOwner) { + object[methodKey] = original; + } else { + delete object[methodKey]; + } + }, + }, + ); + // @ts-expect-error: overriding original method with a mock object[methodKey] = mock; } - mock.mockImplementation(function (this: unknown) { - return original.apply(this, arguments); - }); + this._attachMockImplementation(mock, original); } - this._originalFn.set(object[methodKey] as Mock, original); + return object[methodKey] as Mock; } @@ -1276,18 +1304,24 @@ export class ModuleMocker { ); } - descriptor[accessType] = this._makeComponent({type: 'function'}, () => { - // @ts-expect-error: mock is assignable - descriptor![accessType] = original; - Object.defineProperty(object, propertyKey, descriptor!); - }); + descriptor[accessType] = this._makeComponent( + {type: 'function'}, + { + reset: () => { + this._attachMockImplementation( + descriptor![accessType] as Mock, + original, + ); + }, + restore: () => { + // @ts-expect-error: overriding original method with a mock + descriptor![accessType] = original; + Object.defineProperty(object, propertyKey, descriptor!); + }, + }, + ); - (descriptor[accessType] as Mock).mockImplementation(function ( - this: unknown, - ) { - // @ts-expect-error - wrong context - return original.apply(this, arguments); - }); + this._attachMockImplementation(descriptor[accessType] as Mock, original); } Object.defineProperty(object, propertyKey, descriptor); @@ -1392,7 +1426,7 @@ export class ModuleMocker { restore: () => { restore(); - this._spyState.delete(restore); + this._spyState.delete({restore}); }, }; @@ -1400,7 +1434,7 @@ export class ModuleMocker { restore.property = propertyKey; restore.replaced = replaced; - this._spyState.add(restore); + this._spyState.add({restore}); return replaced.replaceValue(value); } @@ -1410,14 +1444,15 @@ export class ModuleMocker { } resetAllMocks(): void { - this._spyState.forEach(reset => reset()); + this.clearAllMocks(); this._mockConfigRegistry = new WeakMap(); - this._mockState = new WeakMap(); + this._spyState.forEach(spyState => spyState.reset?.()); } restoreAllMocks(): void { - this._mockState = new WeakMap(); - this._spyState.forEach(restore => restore()); + this.clearAllMocks(); + this._mockConfigRegistry = new WeakMap(); + this._spyState.forEach(spyState => spyState.restore()); this._spyState = new Set(); }
diff --git a/packages/jest-mock/src/__tests__/index.test.ts b/packages/jest-mock/src/__tests__/index.test.ts --- a/packages/jest-mock/src/__tests__/index.test.ts +++ b/packages/jest-mock/src/__tests__/index.test.ts @@ -1247,30 +1247,6 @@ describe('moduleMocker', () => { expect(fn.getMockName()).toBe('jest.fn()'); }); - test('after mock reset, the object should return to its original value', () => { - const myObject = {bar: () => 'bar'}; - - const barStub = moduleMocker.spyOn(myObject, 'bar'); - - barStub.mockReturnValue('POTATO!'); - expect(myObject.bar()).toBe('POTATO!'); - barStub.mockReset(); - - expect(myObject.bar()).toBe('bar'); - }); - - test('after resetAllMocks, the object should return to its original value', () => { - const myObject = {bar: () => 'bar'}; - - const barStub = moduleMocker.spyOn(myObject, 'bar'); - - barStub.mockReturnValue('POTATO!'); - expect(myObject.bar()).toBe('POTATO!'); - moduleMocker.resetAllMocks(); - - expect(myObject.bar()).toBe('bar'); - }); - test('mockName gets reset by mockRestore', () => { const fn = jest.fn(); expect(fn.getMockName()).toBe('jest.fn()'); @@ -1344,6 +1320,134 @@ describe('moduleMocker', () => { ); }); + it('supports clearing a spy', () => { + let methodOneCalls = 0; + const obj = { + methodOne() { + methodOneCalls++; + }, + }; + + const spy1 = moduleMocker.spyOn(obj, 'methodOne'); + + obj.methodOne(); + + // The spy and the original function are called. + expect(methodOneCalls).toBe(1); + expect(spy1.mock.calls).toHaveLength(1); + + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + + spy1.mockClear(); + + // After clearing the spy, the method is still mock function. + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + + // After clearing the spy, call count is reset. + expect(spy1.mock.calls).toHaveLength(0); + }); + + it('supports clearing all spies', () => { + let methodOneCalls = 0; + let methodTwoCalls = 0; + const obj = { + methodOne() { + methodOneCalls++; + }, + methodTwo() { + methodTwoCalls++; + }, + }; + + const spy1 = moduleMocker.spyOn(obj, 'methodOne'); + const spy2 = moduleMocker.spyOn(obj, 'methodTwo'); + + obj.methodOne(); + obj.methodTwo(); + + // Both spies and both original functions are called. + expect(methodOneCalls).toBe(1); + expect(methodTwoCalls).toBe(1); + expect(spy1.mock.calls).toHaveLength(1); + expect(spy2.mock.calls).toHaveLength(1); + + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + expect(moduleMocker.isMockFunction(obj.methodTwo)).toBe(true); + + moduleMocker.clearAllMocks(); + + // After clearing all mocks, the methods are still mock functions. + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + expect(moduleMocker.isMockFunction(obj.methodTwo)).toBe(true); + + // After clearing all mocks, call counts are reset. + expect(spy1.mock.calls).toHaveLength(0); + expect(spy2.mock.calls).toHaveLength(0); + }); + + it('supports resetting a spy', () => { + const methodOneReturn = 0; + const obj = { + methodOne() { + return methodOneReturn; + }, + }; + + const spy1 = moduleMocker.spyOn(obj, 'methodOne').mockReturnValue(10); + + // Return value is mocked. + expect(methodOneReturn).toBe(0); + expect(obj.methodOne()).toBe(10); + + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + + spy1.mockReset(); + + // After resetting the spy, the method is still mock functions. + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + + // After resetting the spy, the method returns the original return value. + expect(methodOneReturn).toBe(0); + expect(obj.methodOne()).toBe(0); + }); + + it('supports resetting all spies', () => { + const methodOneReturn = 10; + const methodTwoReturn = 20; + const obj = { + methodOne() { + return methodOneReturn; + }, + methodTwo() { + return methodTwoReturn; + }, + }; + + moduleMocker.spyOn(obj, 'methodOne').mockReturnValue(100); + moduleMocker.spyOn(obj, 'methodTwo').mockReturnValue(200); + + // Return values are mocked. + expect(methodOneReturn).toBe(10); + expect(methodTwoReturn).toBe(20); + expect(obj.methodOne()).toBe(100); + expect(obj.methodTwo()).toBe(200); + + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + expect(moduleMocker.isMockFunction(obj.methodTwo)).toBe(true); + + moduleMocker.resetAllMocks(); + + // After resetting all mocks, the methods are still mock functions. + expect(moduleMocker.isMockFunction(obj.methodOne)).toBe(true); + expect(moduleMocker.isMockFunction(obj.methodTwo)).toBe(true); + + // After resetting all mocks, the methods return the original return value. + expect(methodOneReturn).toBe(10); + expect(methodTwoReturn).toBe(20); + expect(obj.methodOne()).toBe(10); + expect(obj.methodTwo()).toBe(20); + }); + it('supports restoring a spy', () => { let methodOneCalls = 0; const obj = { @@ -1551,6 +1655,59 @@ describe('moduleMocker', () => { ); }); + it('supports resetting a spy', () => { + const methodOneReturn = 0; + const obj = { + get methodOne() { + return methodOneReturn; + }, + }; + + const spy1 = moduleMocker + .spyOn(obj, 'methodOne', 'get') + .mockReturnValue(10); + + // Return value is mocked. + expect(methodOneReturn).toBe(0); + expect(obj.methodOne).toBe(10); + + spy1.mockReset(); + + // After resetting the spy, the method returns the original return value. + expect(methodOneReturn).toBe(0); + expect(obj.methodOne).toBe(0); + }); + + it('supports resetting all spies', () => { + const methodOneReturn = 10; + const methodTwoReturn = 20; + const obj = { + get methodOne() { + return methodOneReturn; + }, + get methodTwo() { + return methodTwoReturn; + }, + }; + + moduleMocker.spyOn(obj, 'methodOne', 'get').mockReturnValue(100); + moduleMocker.spyOn(obj, 'methodTwo', 'get').mockReturnValue(200); + + // Return values are mocked. + expect(methodOneReturn).toBe(10); + expect(methodTwoReturn).toBe(20); + expect(obj.methodOne).toBe(100); + expect(obj.methodTwo).toBe(200); + + moduleMocker.resetAllMocks(); + + // After resetting all mocks, the methods return the original return value. + expect(methodOneReturn).toBe(10); + expect(methodTwoReturn).toBe(20); + expect(obj.methodOne).toBe(10); + expect(obj.methodTwo).toBe(20); + }); + it('supports restoring a spy', () => { let methodOneCalls = 0; const obj = { @@ -1683,6 +1840,61 @@ describe('moduleMocker', () => { expect(obj.property).toBe(true); }); + it('supports resetting a spy on the prototype chain', () => { + const methodOneReturn = 0; + const prototype = { + get methodOne() { + return methodOneReturn; + }, + }; + const obj = Object.create(prototype, {}); + + const spy1 = moduleMocker + .spyOn(obj, 'methodOne', 'get') + .mockReturnValue(10); + + // Return value is mocked. + expect(methodOneReturn).toBe(0); + expect(obj.methodOne).toBe(10); + + spy1.mockReset(); + + // After resetting the spy, the method returns the original return value. + expect(methodOneReturn).toBe(0); + expect(obj.methodOne).toBe(0); + }); + + it('supports resetting all spies on the prototype chain', () => { + const methodOneReturn = 10; + const methodTwoReturn = 20; + const prototype = { + get methodOne() { + return methodOneReturn; + }, + get methodTwo() { + return methodTwoReturn; + }, + }; + const obj = Object.create(prototype, {}); + + moduleMocker.spyOn(obj, 'methodOne', 'get').mockReturnValue(100); + moduleMocker.spyOn(obj, 'methodTwo', 'get').mockReturnValue(200); + + // Return values are mocked. + expect(methodOneReturn).toBe(10); + expect(methodTwoReturn).toBe(20); + expect(obj.methodOne).toBe(100); + expect(obj.methodTwo).toBe(200); + + moduleMocker.resetAllMocks(); + + // After resetting all mocks, the methods return the original return value. + expect(methodOneReturn).toBe(10); + expect(methodTwoReturn).toBe(20); + expect(obj.methodOne).toBe(10); + expect(obj.methodTwo).toBe(20); + }); + it('supports restoring a spy on the prototype chain', () => { let methodOneCalls = 0; const prototype = {
[Bug]: resetAllMocks restoring mocks instead of resetting them ### Version 29.4.0 ### Steps to reproduce Simple test to reproduce: ``` test('it should reset properly', () => { const obj = { foo: () => 42, }; const fooSpy = jest.spyOn(obj, 'foo'); console.log(obj.foo()); expect(obj.foo).toHaveBeenCalled(); jest.resetAllMocks(); console.log(obj.foo()); expect(obj.foo).toHaveBeenCalled(); }); ``` ### Expected behavior The test should pass ### Actual behavior The test fails with `Matcher error: received value must be a mock or spy function`. ### Additional context It seems that `resetAllMocks` is acting like `mockRestore` instead of `mockReset`. If I replace `jest.resetAllMocks()` with `fooSpy.mockReset()`, the test passes as expected, whereas if I replace it with `fooSpy.mockRestore()` it fails with that error again. ### Environment ```shell System: OS: Linux 5.15 KDE neon 5.26 5.26 CPU: (16) x64 AMD Ryzen 7 PRO 6850U with Radeon Graphics Binaries: Node: 18.12.0 - ~/.nvm/versions/node/v18.12.0/bin/node npm: 8.19.2 - ~/.nvm/versions/node/v18.12.0/bin/npm npmPackages: jest: ^29.4.0 => 29.4.0 ```
Are you working on this? No, I don't know enough about Jest internals to suggest a fix. ~Besides, I'd expect that a certain multi-billion-dollar corporation would be maintaining this. Don't get me wrong, I'm all for contributing to open source but if the project is backed by a huge corporation, I think they can spare the time to fix it.~ (see https://jestjs.io/blog/2022/05/11/jest-joins-openjs, thanks @mrazauskas for pointing it out) This line in this PR introduced the regression; https://github.com/facebook/jest/pull/13692/files#diff-7ae0bd704c3c2789b19abe2bbf94aca3505e2a6b3823d85c7f6b316b216d37c9R1411 Looking here (https://github.com/facebook/jest/pull/13692/files#diff-7ae0bd704c3c2789b19abe2bbf94aca3505e2a6b3823d85c7f6b316b216d37c9L1409), the same function is called to restore the mocks. While it's called reset in the lambda function of the new addition, it's still the same function being called. Unsure if just removing this call is the solution, or if something else needs to be done to still retain functionality of that PR I'm actually a bit confused, as it seems the point of that PR ([and the issue it fixes](https://github.com/facebook/jest/issues/13229)) is to make the behaviour of mockReset similar to mockRestore? AFAICT it's still a regression on resetAllMocks though, as that's actually removing the mock status of the function. But the original purpose of that PR is still a breaking change either way @feliperli Could you take a look, please? Seems like your PR (#13692) introduced this issue. Or is that expected behaviour? > Besides, I'd expect that a certain multi-billion-dollar corporation would be maintaining this. By the way, Jest is just another open source project maintained by few volunteers. See https://jestjs.io/blog/2022/05/11/jest-joins-openjs TIL, thanks for pointing that out. I did end up doing some digging, and while I'm not any closer to finding the root cause, I did notice [this](https://github.com/facebook/jest/blob/eca83e7221137785d585215f9ae9251adcdbacc3/packages/jest-mock/src/index.ts#L1410): ``` resetAllMocks(): void { this._spyState.forEach(reset => reset()); this._mockConfigRegistry = new WeakMap(); this._mockState = new WeakMap(); } restoreAllMocks(): void { this._spyState.forEach(restore => restore()); this._spyState = new Set(); } ``` It seems that both of those methods end up calling all the `_spyState` functions and on line 1401 just above, you see `this._spyState.add(restore);` so it seems that `resetAllMocks` is calling the restore function (but confusingly naming it `reset` in the `forEach`). I'm not sure if this is intentional, or if it's not, what a fix would look like, but hopefully this info at least helps. > I'm not sure if this is intentional, or if it's not, what a fix would look like, but hopefully this info at least helps. also seeing this impact our unit tests, with only a regenerated lockfile causing the breaking change > @feliperli Could you take a look, please? Seems like your PR (#13692) introduced this issue. Or is that expected behaviour? Oh no :( I'm looking into it. I also see this!!!! I have a different problem, but I think the cause is the same change in `mockReset`. Someone called `spyOn` on a mock function and it caused a maximum call stack error This is an example that reproduces the error: ``` js describe('test console.log', () => { console.log = jest.fn(); const log = jest.spyOn(console, 'log'); afterEach(() => { log.mockReset(); }); test('console.log a', () => { console.log('test a'); expect(log).toBeCalledWith('test a'); }); test('console.log b', () => { /** * An error is thrown on the next line: * RangeError: Maximum call stack size exceeded * at WeakMap.get (<anonymous>) * at ModuleMocker._ensureMockState (node_modules/jest-mock/build/index.js:275:33) */ console.log('test b'); expect(log).toBeCalledWith('test b'); }); }); ``` When I remove the line `this._mockConfigRegistry.set(f, originalMockImpl);` it works fine. Seems related: I'm also getting random RangeError: Maximum Call Stack Size Exceeded that are pointing to the `jest-mock` package, reverting pack to 29.3.1 fixes the issue.
"2023-02-07T09:15:40Z"
29.4
[]
[ "packages/jest-mock/src/__tests__/index.test.ts" ]
TypeScript
[]
[]
jestjs/jest
13,888
jestjs__jest-13888
[ "13887" ]
94b73a2dd8bf8f6225769f2fe4a9322ac85483d8
diff --git a/packages/jest-mock/src/index.ts b/packages/jest-mock/src/index.ts --- a/packages/jest-mock/src/index.ts +++ b/packages/jest-mock/src/index.ts @@ -842,16 +842,20 @@ export class ModuleMocker { // Remember previous mock implementation, then set new one const mockConfig = this._ensureMockConfig(f); const previousImplementation = mockConfig.mockImpl; + const previousSpecificImplementations = mockConfig.specificMockImpls; mockConfig.mockImpl = fn; + mockConfig.specificMockImpls = []; const returnedValue = callback(); if (isPromise(returnedValue)) { return returnedValue.then(() => { mockConfig.mockImpl = previousImplementation; + mockConfig.specificMockImpls = previousSpecificImplementations; }); } else { mockConfig.mockImpl = previousImplementation; + mockConfig.specificMockImpls = previousSpecificImplementations; } }
diff --git a/packages/jest-mock/src/__tests__/index.test.ts b/packages/jest-mock/src/__tests__/index.test.ts --- a/packages/jest-mock/src/__tests__/index.test.ts +++ b/packages/jest-mock/src/__tests__/index.test.ts @@ -1143,6 +1143,38 @@ describe('moduleMocker', () => { expect.assertions(4); }); + + it('mockImplementationOnce does not bleed into withImplementation', () => { + const mock = jest + .fn(() => 'outside callback') + .mockImplementationOnce(() => 'once'); + + mock.withImplementation( + () => 'inside callback', + () => { + expect(mock()).toBe('inside callback'); + }, + ); + + expect(mock()).toBe('once'); + expect(mock()).toBe('outside callback'); + }); + + it('mockReturnValueOnce does not bleed into withImplementation', () => { + const mock = jest + .fn(() => 'outside callback') + .mockReturnValueOnce('once'); + + mock.withImplementation( + () => 'inside callback', + () => { + expect(mock()).toBe('inside callback'); + }, + ); + + expect(mock()).toBe('once'); + expect(mock()).toBe('outside callback'); + }); }); test('mockReturnValue does not override mockImplementationOnce', () => {
[Bug]: mockImplementationOnce bleeds into withImplementation ### Version 29.4.2 ### Steps to reproduce 1. Open [online repo](https://stackblitz.com/edit/node-7pqpfa?file=mock.test.js) 2. Run `npm install` 3. Run `npm test` ### Expected behavior ```js const mock = jest .fn(() => 'outside callback') .mockImplementationOnce(() => 'once'); mock.withImplementation( () => 'inside callback', () => { expect(mock()).toBe('inside callback'); } ); expect(mock()).toBe('once'); expect(mock()).toBe('outside callback'); ``` ### Actual behavior ```js const mock = jest .fn(() => 'outside callback') .mockImplementationOnce(() => 'once'); mock.withImplementation( () => 'inside callback', () => { mock() // 'once' } ); mock() // 'outside callback' mock() // 'outside callback' ``` ### Additional context I think it is a little counterintuitive when unused `implementationOnce` bleeds into `withImplementation`. ### Environment ```shell System: OS: macOS 13.0.1 CPU: (8) arm64 Apple M1 Pro Binaries: Node: 18.12.1 - ~/.nvm/versions/node/v18.12.1/bin/node Yarn: 1.22.4 - ~/.yarn/bin/yarn npm: 8.19.2 - ~/.nvm/versions/node/v18.12.1/bin/npm npmPackages: jest: ^29.4.2 => 29.4.2 ```
"2023-02-10T08:09:01Z"
29.4
[]
[ "packages/jest-mock/src/__tests__/index.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,007
jestjs__jest-14007
[ "10167" ]
94c06ef0aa9b327f3c400610b861e7308b29ee0d
diff --git a/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts b/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts --- a/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts +++ b/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts @@ -57,9 +57,18 @@ export default function deepCyclicCopyReplaceable<T>( function deepCyclicCopyObject<T>(object: T, cycles: WeakMap<any, unknown>): T { const newObject = Object.create(Object.getPrototypeOf(object)); - const descriptors: { - [x: string]: PropertyDescriptor; - } = Object.getOwnPropertyDescriptors(object); + let descriptors: Record<string, PropertyDescriptor> = {}; + let obj = object; + do { + descriptors = Object.assign( + {}, + Object.getOwnPropertyDescriptors(obj), + descriptors, + ); + } while ( + (obj = Object.getPrototypeOf(obj)) && + obj !== Object.getPrototypeOf({}) + ); cycles.set(object, newObject);
diff --git a/packages/jest-matcher-utils/src/__tests__/deepCyclicCopyReplaceable.test.ts b/packages/jest-matcher-utils/src/__tests__/deepCyclicCopyReplaceable.test.ts --- a/packages/jest-matcher-utils/src/__tests__/deepCyclicCopyReplaceable.test.ts +++ b/packages/jest-matcher-utils/src/__tests__/deepCyclicCopyReplaceable.test.ts @@ -43,7 +43,7 @@ test('convert accessor descriptor into value descriptor', () => { }); }); -test('should not skips non-enumerables', () => { +test('should not skip non-enumerables', () => { const obj = {}; Object.defineProperty(obj, 'foo', {enumerable: false, value: 'bar'}); @@ -66,6 +66,18 @@ test('copies symbols', () => { expect(deepCyclicCopyReplaceable(obj)[symbol]).toBe(42); }); +test('copies value of inherited getters', () => { + class Foo { + #foo = 42; + get foo() { + return this.#foo; + } + } + const obj = new Foo(); + + expect(deepCyclicCopyReplaceable(obj).foo).toBe(42); +}); + test('copies arrays as array objects', () => { const array = [null, 42, 'foo', 'bar', [], {}];
toEqual throws a TypeError when using getter to private field of class ## πŸ› Bug Report Jest throws a `TypeError` when using `toEqual` or `toMatchObject` in a class with private fields (those starting by `#`) and a getter that exposes the value. I guess other similar matchers like `toStrictEqual` also fails but didn't test. I tested this using jest-circus and fails the same way. The class under test has a private field `#foo` and a getter `get foo()` that returns the value of `#foo`. Using `toMatchObject` with the name of the getter (something like `.toMatchObject({ foo: 0 })`) works when the value is equal but throws a `TypeError` when are distinct (I bet when trying to show the differences, but I haven't debugged). ## To Reproduce ```js class X { #foo = 42; get foo() { return this.#foo; } } test("it works and test pass", () => { const x = new X(); expect(x.foo).toEqual(42); }); test("it also works and test pass", () => { const x = new X(); expect(x).toMatchObject({ foo: 42 }); }); test("it works, the test fail since values are distinct", () => { const x = new X(); expect(x.foo).toEqual(0); // Expected: 0, Received: 42 }); test("it doesn't work, matcher throws a TypeError", () => { const x = new X(); expect(x).toMatchObject({ foo: 0 }); // TypeError: Cannot read private member #foo from an object whose class did not declare it }); test("also doesn't work, matcher throws a TypeError", () => { const x = new X(); expect(x).toEqual({ foo: 0 }); // TypeError: Cannot read private member #foo from an object whose class did not declare it }); ``` ## Expected behavior Last two test fail properly since property `foo` are distinct between expected and received objects, showing the right report. ## envinfo ```plain System: OS: Linux 5.6 Arch Linux CPU: (8) x64 Intel(R) Core(TM) i7-6700 CPU @ 3.40GHz Binaries: Node: 12.17.0 - /usr/bin/node npm: 6.14.5 - /usr/bin/npm npmPackages: jest: ^26.0.1 => 26.0.1 ```
@InExtremaRes did you ever find a fix or workaround for this? I've too run into this where I am trying to check the equality of a class's getters with a jest expect. @defunctzombie No, not really. I started creating a plain object with the properties I want to compare, but that's far from ideal. I made a little utility `pick` to take some properties from a given object: ```js const x = new X(); // suppose this have props `foo` and `bar` expect(pick(x, ['foo', 'bar'])).toEqual({ foo: 1, bar: 2 }); ``` I hope to see some progress on this, the way I'm doing it is very tedious. I've got the same problem. Is there any update? I have the same problem as @defunctzombie, but using `toMatchObject` instead of `toEqual` seems to make the tests pass as a workaround. It would be nice to get this fixed though as the error is really misleading and requires knowledge of the underlying class to know to use `toMatchObject` vs `toEqual`. Having this issue also on `node: "16.17.1", jest: "29.1.2"`. This issue is still reproducible with jest `29.2.2`. See https://stackblitz.com/edit/node-p9xadj?file=index.spec.js Jest is able to determine properly, that the expected value does not match the received value, so it starts to build the corresponding console output for the difference. For this [`printDiffOrStringify`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/index.ts#L318) is called, which in turn calls [`replaceMatchedToAsymmetricMatcher`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/index.ts#L410), which calls [`deepCyclicCopyReplaceable`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts#L37) which ends up at [`deepCyclicCopyObject`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts#L58). Now this method enumerates the properties of the received value, which is an instance of `X` containing `#foo` and inheriting `get foo()`. However the inherited `get foo()` is not enough for this method to identify `foo` as a property, i.e. [`newDescriptors`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts#L66) ends up as an empty object. If the getter for `foo` was identified, the value of `foo` would have been copied into the new object, replacing the getter with an actual value, however this does not happen, so [`_replaceMatchedToAsymmetricMatcher`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/index.ts#L424) receives an object as the `replacedReceived` parameter with the `foo` property missing. Naturally when later this method tries to print out the value of `foo` on `replacedReceived` using the [`receivedReplaceable.get`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/index.ts#L448) method instead of printing the value as would happened if the value of `foo` was copied, it [ends up accessing `foo` on an object with no such property](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/Replaceable.ts#L59) which in turn calls the getter defined on the parent (prototype of class `X`), however since the private property `#foo` was not copied either to `replacedReceived` from the original one, the getter now fails with `Cannot read private member #foo from an object whose class did not declare it`. So the problem here has two parts: `deepCyclicCopyObject` is: 1. not replacing the getter defined on the prototype with the value, 2. nor able to copy the private fields over. Therefore the getter will be called when the rest of the code expects it to be already replaced with a value, and the getter in turn tries to access the private field, which is also missing by this time (working with the partially copied object). Now obviously copying the private fields is not achievable, so 2.) is not fixable, so we can only expect 1.) to be fixed. The properties copied over are determined by [`Object.getOwnPropertyDescriptors`](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts#L60) which includes both enumerable and non-enumerable properties, where properties are in the wider sense, so they include getters, setters etc. However only own, nothing inherited. And since `class X` defines the getter for `foo` on the class, from the perspective of `x` it is an inherited getter, therefore it is skipped by `Object.getOwnPropertyDescriptors`. As far as i know there is no built in method that could get the inherited descriptors together with own, so the naive solution is to iterate the prototype chain and collect the descriptors for each level (excluding the root object model). According to that a naive fix can be to replace ```ts const descriptors: { [x: string]: PropertyDescriptor; } = Object.getOwnPropertyDescriptors(object); ``` in [deepCyclicCopyReplaceable.ts](https://github.com/facebook/jest/blob/main/packages/jest-matcher-utils/src/deepCyclicCopyReplaceable.ts#L60-L62) with: ```ts let descriptors: Record<string, PropertyDescriptor> = {}; let obj = object; do { descriptors = Object.assign({}, Object.getOwnPropertyDescriptors(obj), descriptors) } while ( (obj = Object.getPrototypeOf( obj )) && obj !== Object.getPrototypeOf({}) ); ``` With this in place all the test cases from the top of the issue pass or fail gracefully with proper comparison output in the console. Let me know if PR is welcome! > Let me know if PR is welcome! Go for it!!! :-D
"2023-03-14T16:06:02Z"
29.5
[]
[ "packages/jest-matcher-utils/src/__tests__/deepCyclicCopyReplaceable.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,048
jestjs__jest-14048
[ "13980" ]
257f250a977264656eca386629c7ce3b6481f21b
diff --git a/packages/jest-transform/src/shouldInstrument.ts b/packages/jest-transform/src/shouldInstrument.ts --- a/packages/jest-transform/src/shouldInstrument.ts +++ b/packages/jest-transform/src/shouldInstrument.ts @@ -114,5 +114,9 @@ export default function shouldInstrument( } } + if (filename.endsWith('.json')) { + return false; + } + return true; }
diff --git a/packages/jest-transform/src/__tests__/shouldInstrument.test.ts b/packages/jest-transform/src/__tests__/shouldInstrument.test.ts --- a/packages/jest-transform/src/__tests__/shouldInstrument.test.ts +++ b/packages/jest-transform/src/__tests__/shouldInstrument.test.ts @@ -119,6 +119,12 @@ describe('shouldInstrument', () => { ['dont/collect/coverage.js'], ); }); + + it('when file is a .json module, but matches forceCoverageMatch', () => { + testShouldInstrument('do/collect/coverage.json', defaultOptions, { + forceCoverageMatch: ['**/do/**/*.json'], + }); + }); }); describe('should return false', () => { @@ -245,5 +251,13 @@ describe('shouldInstrument', () => { ['do/collect/coverage.js'], ); }); + + it('when file is a .json module', () => { + testShouldInstrument( + 'dont/collect/coverage.json', + defaultOptions, + defaultConfig, + ); + }); }); });
[Bug]: import JSON fails when coverage is enabled ### Version 29.4.3 ### Steps to reproduce data.json ```json {"hello":"world"} ``` jest.config.json ```json { "testEnvironment": "node", "transform": {} } ``` package.json ```json { "name": "jest_json_coverage", "scripts": { "test": "node --experimental-json-modules --experimental-vm-modules node_modules/jest/bin/jest.js" }, "type": "module", "devDependencies": { "jest": "29.4.3" } } ``` example.test.js ```js import myJSON from './data.json' assert {type:"json"}; test("t00",()=>{console.log(myJSON);}) ``` **Commands:** ```sh $ npm t # works ok $ npm t -- --coverage # fails ``` ### Expected behavior Test passes without runtime error. ### Actual behavior output: ```sh ● Test suite failed to run Jest encountered an unexpected token Jest failed to parse a file. This happens e.g. when your code or its dependencies use non-standard JavaScript syntax, or when Jest is not configured to support such syntax. Out of the box Jest supports Babel, which will be used to transform your files into valid JS based on your Babel configuration. By default "node_modules" folder is ignored by transformers. Here's what you can do: β€’ If you are trying to use ECMAScript Modules, see https://jestjs.io/docs/ecmascript-modules for how to enable it. β€’ If you are trying to use TypeScript, see https://jestjs.io/docs/getting-started#using-typescript β€’ To have some of your "node_modules" files transformed, you can specify a custom "transformIgnorePatterns" in your config. β€’ If you need a custom transformation specify a "transform" option in your config. β€’ If you simply want to mock your non-JS modules (e.g. binary assets) you can stub them out with the "moduleNameMapper" config option. You'll find more details and examples of these config options in the docs: https://jestjs.io/docs/configuration For information about custom transformations, see: https://jestjs.io/docs/code-transformation Details: SyntaxError: /home/armfazh/tmp/jest_json_coverage/data.json: Missing semicolon. (1:8) > 1 | {"hello":"world"} | ^ 2 | at instantiate (node_modules/@babel/parser/src/parse-error/credentials.ts:62:21) at instantiate (node_modules/@babel/parser/src/parse-error.ts:60:12) at Parser.toParseError [as raise] (node_modules/@babel/parser/src/tokenizer/index.ts:1464:19) at Parser.raise [as semicolon] (node_modules/@babel/parser/src/parser/util.ts:138:10) at Parser.semicolon [as parseExpressionStatement] (node_modules/@babel/parser/src/parser/statement.ts:1282:10) at Parser.parseExpressionStatement [as parseStatementContent] (node_modules/@babel/parser/src/parser/statement.ts:643:19) at Parser.parseStatementContent [as parseStatementLike] (node_modules/@babel/parser/src/parser/statement.ts:416:17) at Parser.parseStatementLike [as parseStatementListItem] (node_modules/@babel/parser/src/parser/statement.ts:365:17) at Parser.parseStatementListItem [as parseBlockOrModuleBlockBody] (node_modules/@babel/parser/src/parser/statement.ts:1363:16) at Parser.parseBlockOrModuleBlockBody [as parseBlockBody] (node_modules/@babel/parser/src/parser/statement.ts:1336:10) at Parser.parseBlockBody [as parseBlock] (node_modules/@babel/parser/src/parser/statement.ts:1304:10) at Parser.parseBlock [as parseStatementContent] (node_modules/@babel/parser/src/parser/statement.ts:547:21) at Parser.parseStatementContent [as parseStatementLike] (node_modules/@babel/parser/src/parser/statement.ts:416:17) at Parser.parseStatementLike [as parseModuleItem] (node_modules/@babel/parser/src/parser/statement.ts:353:17) at Parser.parseModuleItem [as parseBlockOrModuleBlockBody] (node_modules/@babel/parser/src/parser/statement.ts:1362:16) at Parser.parseBlockOrModuleBlockBody [as parseBlockBody] (node_modules/@babel/parser/src/parser/statement.ts:1336:10) at Parser.parseBlockBody [as parseProgram] (node_modules/@babel/parser/src/parser/statement.ts:226:10) at Parser.parseProgram [as parseTopLevel] (node_modules/@babel/parser/src/parser/statement.ts:208:25) at Parser.parseTopLevel [as parse] (node_modules/@babel/parser/src/parser/index.ts:45:10) at parse (node_modules/@babel/parser/src/index.ts:67:38) at parser (node_modules/@babel/core/src/parser/index.ts:28:19) at parser.next (<anonymous>) at normalizeFile (node_modules/@babel/core/src/transformation/normalize-file.ts:51:24) at normalizeFile.next (<anonymous>) at run (node_modules/@babel/core/src/transformation/index.ts:38:36) at run.next (<anonymous>) at transform (node_modules/@babel/core/src/transform.ts:29:20) at transform.next (<anonymous>) at evaluateSync (node_modules/gensync/index.js:251:28) at sync (node_modules/gensync/index.js:89:14) at fn (node_modules/@babel/core/src/errors/rewrite-stack-trace.ts:97:14) at transformSync (node_modules/@babel/core/src/transform.ts:66:52) at ScriptTransformer._instrumentFile (node_modules/@jest/transform/build/ScriptTransformer.js:389:46) at ScriptTransformer._buildTransformResult (node_modules/@jest/transform/build/ScriptTransformer.js:491:33) at ScriptTransformer.transformSourceAsync (node_modules/@jest/transform/build/ScriptTransformer.js:605:17) at ScriptTransformer._transformAndBuildScriptAsync (node_modules/@jest/transform/build/ScriptTransformer.js:636:35) at ScriptTransformer.transformAsync (node_modules/@jest/transform/build/ScriptTransformer.js:700:14) at async Promise.all (index 0) ``` ### Additional context _No response_ ### Environment ```shell System: OS: Linux 5.4 Ubuntu 20.04.5 LTS (Focal Fossa) CPU: (8) x64 Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz Binaries: Node: 18.0.0 - ~/.nvm/versions/node/v18.0.0/bin/node npm: 8.7.0 - ~/.nvm/versions/node/v18.0.0/bin/npm npmPackages: jest: 29.4.3 => 29.4.3 ```
This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 30 days.
"2023-04-03T10:27:41Z"
29.5
[]
[ "packages/jest-transform/src/__tests__/shouldInstrument.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,054
jestjs__jest-14054
[ "8293" ]
257f250a977264656eca386629c7ce3b6481f21b
diff --git a/packages/jest-config/src/readConfigFileAndSetRootDir.ts b/packages/jest-config/src/readConfigFileAndSetRootDir.ts --- a/packages/jest-config/src/readConfigFileAndSetRootDir.ts +++ b/packages/jest-config/src/readConfigFileAndSetRootDir.ts @@ -64,14 +64,17 @@ export default async function readConfigFileAndSetRootDir( // We don't touch it if it has an absolute path specified if (!path.isAbsolute(configObject.rootDir)) { // otherwise, we'll resolve it relative to the file's __dirname - configObject.rootDir = path.resolve( - path.dirname(configPath), - configObject.rootDir, - ); + configObject = { + ...configObject, + rootDir: path.resolve(path.dirname(configPath), configObject.rootDir), + }; } } else { // If rootDir is not there, we'll set it to this file's __dirname - configObject.rootDir = path.dirname(configPath); + configObject = { + ...configObject, + rootDir: path.dirname(configPath), + }; } return configObject;
diff --git a/packages/jest-config/src/__tests__/readConfigFileAndSetRootDir.test.ts b/packages/jest-config/src/__tests__/readConfigFileAndSetRootDir.test.ts new file mode 100644 --- /dev/null +++ b/packages/jest-config/src/__tests__/readConfigFileAndSetRootDir.test.ts @@ -0,0 +1,169 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +import path = require('path'); +import * as fs from 'graceful-fs'; +import {requireOrImportModule} from 'jest-util'; +import readConfigFileAndSetRootDir from '../readConfigFileAndSetRootDir'; + +jest.mock('graceful-fs').mock('jest-util'); + +describe('readConfigFileAndSetRootDir', () => { + describe('JavaScript file', () => { + test('reads config and sets `rootDir`', async () => { + jest.mocked(requireOrImportModule).mockResolvedValueOnce({notify: true}); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'jest.config.js'), + ); + + expect(config).toEqual({notify: true, rootDir}); + }); + + test('handles exported function', async () => { + jest + .mocked(requireOrImportModule) + .mockResolvedValueOnce(() => ({bail: 1})); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'jest.config.js'), + ); + + expect(config).toEqual({bail: 1, rootDir}); + }); + + test('handles exported async function', async () => { + jest + .mocked(requireOrImportModule) + .mockResolvedValueOnce(async () => ({testTimeout: 10000})); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'jest.config.js'), + ); + + expect(config).toEqual({rootDir, testTimeout: 10000}); + }); + }); + + describe('JSON file', () => { + test('reads config and sets `rootDir`', async () => { + jest.mocked(fs.readFileSync).mockReturnValueOnce('{ "verbose": true }'); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'jest.config.json'), + ); + + expect(config).toEqual({rootDir, verbose: true}); + }); + + test('supports comments in JSON', async () => { + jest + .mocked(fs.readFileSync) + .mockReturnValueOnce('{ // test comment\n "bail": true }'); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'jest.config.json'), + ); + + expect(config).toEqual({bail: true, rootDir}); + }); + }); + + describe('package.json file', () => { + test('reads config from "jest" key and sets `rootDir`', async () => { + jest + .mocked(fs.readFileSync) + .mockReturnValueOnce('{ "jest": { "coverage": true } }'); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'package.json'), + ); + + expect(config).toEqual({coverage: true, rootDir}); + }); + + test('sets rootDir if "jest" is absent', async () => { + jest.mocked(fs.readFileSync).mockReturnValueOnce('{ "name": "test" }'); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'package.json'), + ); + + expect(config).toEqual({rootDir}); + }); + }); + + describe('sets `rootDir`', () => { + test('handles frozen config object', async () => { + jest + .mocked(requireOrImportModule) + .mockResolvedValueOnce(Object.freeze({preset: 'some-preset'})); + + const rootDir = path.resolve('some', 'path', 'to'); + const config = await readConfigFileAndSetRootDir( + path.join(rootDir, 'jest.config.js'), + ); + + expect(config).toEqual({preset: 'some-preset', rootDir}); + }); + + test('keeps the path if it is absolute', async () => { + const rootDir = path.resolve('some', 'path', 'to'); + jest.mocked(requireOrImportModule).mockResolvedValueOnce({ + rootDir, + testEnvironment: 'node', + }); + + const config = await readConfigFileAndSetRootDir( + path.join(path.resolve('other', 'path', 'to'), 'jest.config.js'), + ); + + expect(config).toEqual({rootDir, testEnvironment: 'node'}); + }); + + test('resolves the path relative to dirname of the config file', async () => { + jest.mocked(requireOrImportModule).mockResolvedValueOnce({ + restoreMocks: true, + rootDir: path.join('path', 'to'), + }); + + const config = await readConfigFileAndSetRootDir( + path.join(path.resolve('some'), 'jest.config.js'), + ); + + expect(config).toEqual({ + restoreMocks: true, + rootDir: path.resolve('some', 'path', 'to'), + }); + }); + + test('resolves relative path when the read config object if frozen', async () => { + jest.mocked(requireOrImportModule).mockResolvedValueOnce( + Object.freeze({ + resetModules: true, + rootDir: path.join('path', 'to'), + }), + ); + + const config = await readConfigFileAndSetRootDir( + path.join(path.resolve('some'), 'jest.config.js'), + ); + + expect(config).toEqual({ + resetModules: true, + rootDir: path.resolve('some', 'path', 'to'), + }); + }); + }); +});
Require of a shared config file returns unwanted rootDir field <!-- Love Jest? Please consider supporting our collective: πŸ‘‰ https://opencollective.com/jest/donate --> ## To Reproduce Create the following monorepo structure with configs that reference each other: _/jest.config.js_ ```js module.exports = { projects: [ 'packages/*' ] } ``` _/packages/build-scripts/jest.config.js_ ```js module.exports = { testMatch: [ '<rootDir>/**/test/*.js' ] } ``` _/packages/foo/jest.config.js_ ```js const commonConfig = require( '../build-scripts/jest.config.js' ); module.exports = { ...commonConfig, testEnvironment: 'jsdom', } ``` ## Expected behavior Each of the configs, when `require`d, will have `rootDir` set to the directory where the `jest.config.js` is. Because the configs don't specify them. ## Actual behavior The first require of `packages/build-scripts/jest.config.js` (when Jest reads the config) will mutate the module object and will set `rootDir` to `/abs/path/to/packages/build-scripts`. The second require (from the `packages/foo/jest.config.js` file) will return the module object with `rootDir` already set to `/abs/path/to/packages/build-scripts` and Jest won't set it to `/abs/path/to/packages/foo` as expected. Can be worked around by carefully setting `rootDir: '.'` on every config that extends the common one.
Another issue that has the same root cause: export a frozen object from the `jest.config.js`: ```js module.exports = Object.freeze( { ... } ); ``` Then Jest will fail to read it: ``` TypeError: Cannot assign to read only property 'rootDir' of object '#<Object>' at _default (.../node_modules/jest-config/build/readConfigFileAndSetRootDir.js:75:28) at readConfig (.../node_modules/jest-config/build/index.js:160:59) ``` Here's a reproduction https://github.com/also/jest-project-shared-config-bug. The way I experienced this issue was sharing a single base config in each project. This meant that all projects got the `rootDir` of the first and that project ran multiple times while the others never ran. Oh wow, what a stupid bug πŸ˜… Thanks for the great reproduction @also! Would you be able to send a PR? This diff seems to fix it: ```diff diff --git i/packages/jest-config/src/readConfigFileAndSetRootDir.ts w/packages/jest-config/src/readConfigFileAndSetRootDir.ts index e4361c8c1f..b30e37fca0 100644 --- i/packages/jest-config/src/readConfigFileAndSetRootDir.ts +++ w/packages/jest-config/src/readConfigFileAndSetRootDir.ts @@ -64,14 +64,17 @@ export default async function readConfigFileAndSetRootDir( // We don't touch it if it has an absolute path specified if (!path.isAbsolute(configObject.rootDir)) { // otherwise, we'll resolve it relative to the file's __dirname - configObject.rootDir = path.resolve( - path.dirname(configPath), - configObject.rootDir, - ); + configObject = { + ...configObject, + rootDir: path.resolve(path.dirname(configPath), configObject.rootDir), + }; } } else { // If rootDir is not there, we'll set it to this file's __dirname - configObject.rootDir = path.dirname(configPath); + configObject = { + ...configObject, + rootDir: path.dirname(configPath), + }; } return configObject; ``` This issue is stale because it has been open for 1 year with no activity. Remove stale label or comment or this will be closed in 30 days.
"2023-04-05T05:21:34Z"
29.5
[]
[ "packages/jest-config/src/__tests__/readConfigFileAndSetRootDir.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,087
jestjs__jest-14087
[ "14077", "14082" ]
77c2c6e00e0da58e242862237e541af4dde90d3c
diff --git a/packages/jest-mock/src/index.ts b/packages/jest-mock/src/index.ts --- a/packages/jest-mock/src/index.ts +++ b/packages/jest-mock/src/index.ts @@ -1156,19 +1156,16 @@ export class ModuleMocker { methodKey: keyof T, accessType?: 'get' | 'set', ): MockInstance { - if (typeof object !== 'object' && typeof object !== 'function') { + if ( + object == null || + (typeof object !== 'object' && typeof object !== 'function') + ) { throw new Error( `Cannot use spyOn on a primitive value; ${this._typeOf(object)} given`, ); } - if (!object) { - throw new Error( - `spyOn could not find an object to spy on for ${String(methodKey)}`, - ); - } - - if (!methodKey) { + if (methodKey == null) { throw new Error('No property name supplied'); } @@ -1178,12 +1175,20 @@ export class ModuleMocker { const original = object[methodKey]; + if (!original) { + throw new Error( + `Property \`${String( + methodKey, + )}\` does not exist in the provided object`, + ); + } + if (!this.isMockFunction(original)) { if (typeof original !== 'function') { throw new Error( - `Cannot spy on the ${String( + `Cannot spy on the \`${String( methodKey, - )} property because it is not a function; ${this._typeOf( + )}\` property because it is not a function; ${this._typeOf( original, )} given instead.${ typeof original !== 'object' @@ -1266,18 +1271,24 @@ export class ModuleMocker { } if (!descriptor) { - throw new Error(`${String(propertyKey)} property does not exist`); + throw new Error( + `Property \`${String( + propertyKey, + )}\` does not exist in the provided object`, + ); } if (!descriptor.configurable) { - throw new Error(`${String(propertyKey)} is not declared configurable`); + throw new Error( + `Property \`${String(propertyKey)}\` is not declared configurable`, + ); } if (!descriptor[accessType]) { throw new Error( - `Property ${String( + `Property \`${String( propertyKey, - )} does not have access type ${accessType}`, + )}\` does not have access type ${accessType}`, ); } @@ -1329,26 +1340,21 @@ export class ModuleMocker { propertyKey: K, value: T[K], ): Replaced<T[K]> { - if (object === undefined || object == null) { + if ( + object == null || + (typeof object !== 'object' && typeof object !== 'function') + ) { throw new Error( - `replaceProperty could not find an object on which to replace ${String( - propertyKey, - )}`, + `Cannot use replaceProperty on a primitive value; ${this._typeOf( + object, + )} given`, ); } - if (propertyKey === undefined || propertyKey === null) { + if (propertyKey == null) { throw new Error('No property name supplied'); } - if (typeof object !== 'object') { - throw new Error( - `Cannot mock property on a non-object value; ${this._typeOf( - object, - )} given`, - ); - } - let descriptor = Object.getOwnPropertyDescriptor(object, propertyKey); let proto = Object.getPrototypeOf(object); while (!descriptor && proto !== null) { @@ -1356,17 +1362,23 @@ export class ModuleMocker { proto = Object.getPrototypeOf(proto); } if (!descriptor) { - throw new Error(`${String(propertyKey)} property does not exist`); + throw new Error( + `Property \`${String( + propertyKey, + )}\` does not exist in the provided object`, + ); } if (!descriptor.configurable) { - throw new Error(`${String(propertyKey)} is not declared configurable`); + throw new Error( + `Property \`${String(propertyKey)}\` is not declared configurable`, + ); } if (descriptor.get !== undefined) { throw new Error( - `Cannot mock the ${String( + `Cannot replace the \`${String( propertyKey, - )} property because it has a getter. Use \`jest.spyOn(object, '${String( + )}\` property because it has a getter. Use \`jest.spyOn(object, '${String( propertyKey, )}', 'get').mockReturnValue(value)\` instead.`, ); @@ -1374,9 +1386,9 @@ export class ModuleMocker { if (descriptor.set !== undefined) { throw new Error( - `Cannot mock the ${String( + `Cannot replace the \`${String( propertyKey, - )} property because it has a setter. Use \`jest.spyOn(object, '${String( + )}\` property because it has a setter. Use \`jest.spyOn(object, '${String( propertyKey, )}', 'set').mockReturnValue(value)\` instead.`, ); @@ -1384,9 +1396,9 @@ export class ModuleMocker { if (typeof descriptor.value === 'function') { throw new Error( - `Cannot mock the ${String( + `Cannot replace the \`${String( propertyKey, - )} property because it is a function. Use \`jest.spyOn(object, '${String( + )}\` property because it is a function. Use \`jest.spyOn(object, '${String( propertyKey, )}')\` instead.`, );
diff --git a/packages/jest-mock/src/__tests__/index.test.ts b/packages/jest-mock/src/__tests__/index.test.ts --- a/packages/jest-mock/src/__tests__/index.test.ts +++ b/packages/jest-mock/src/__tests__/index.test.ts @@ -1304,20 +1304,181 @@ describe('moduleMocker', () => { expect(spy).not.toHaveBeenCalled(); }); - it('should throw on invalid input', () => { - expect(() => { - moduleMocker.spyOn(null, 'method'); - }).toThrow('spyOn could not find an object to spy on for method'); - expect(() => { - moduleMocker.spyOn({}, 'method'); - }).toThrow( - "Cannot spy on the method property because it is not a function; undefined given instead. If you are trying to mock a property, use `jest.replaceProperty(object, 'method', value)` instead.", - ); - expect(() => { - moduleMocker.spyOn({method: 10}, 'method'); - }).toThrow( - "Cannot spy on the method property because it is not a function; number given instead. If you are trying to mock a property, use `jest.replaceProperty(object, 'method', value)` instead.", + describe('should throw', () => { + it.each` + value | type + ${'foo'} | ${'string'} + ${1} | ${'number'} + ${NaN} | ${'number'} + ${1n} | ${'bigint'} + ${Symbol()} | ${'symbol'} + ${true} | ${'boolean'} + ${false} | ${'boolean'} + ${undefined} | ${'undefined'} + ${null} | ${'null'} + `( + 'when primitive value $value is provided instead of an object', + ({value, type}) => { + expect(() => { + moduleMocker.spyOn(value, 'method'); + }).toThrow(`Cannot use spyOn on a primitive value; ${type} given`); + }, ); + + it('when property name is not provided', () => { + expect(() => { + moduleMocker.spyOn({}, null); + }).toThrow('No property name supplied'); + }); + + it('when property does not exist', () => { + expect(() => { + moduleMocker.spyOn({}, 'doesNotExist'); + }).toThrow( + 'Property `doesNotExist` does not exist in the provided object', + ); + }); + + it('when getter does not exist', () => { + expect(() => { + moduleMocker.spyOn({}, 'missingGet', 'get'); + }).toThrow( + 'Property `missingGet` does not exist in the provided object', + ); + }); + + it('when setter does not exist', () => { + expect(() => { + moduleMocker.spyOn({}, 'missingSet', 'set'); + }).toThrow( + 'Property `missingSet` does not exist in the provided object', + ); + }); + + it('when getter is not configurable', () => { + expect(() => { + const obj = {}; + + Object.defineProperty(obj, 'property', { + configurable: false, + get() { + return 1; + }, + }); + + moduleMocker.spyOn(obj, 'property', 'get'); + }).toThrow('Property `property` is not declared configurable'); + }); + + it('when setter is not configurable', () => { + expect(() => { + const obj = {}; + let value = 38; + + Object.defineProperty(obj, 'property', { + configurable: false, + get() { + return value; + }, + set(newValue) { + value = newValue; + }, + }); + + moduleMocker.spyOn(obj, 'property', 'set'); + }).toThrow('Property `property` is not declared configurable'); + }); + + it('when property does not have access type get', () => { + expect(() => { + const obj = {}; + let value = 38; + + // eslint-disable-next-line accessor-pairs + Object.defineProperty(obj, 'property', { + configurable: true, + set(newValue) { + value = newValue; + }, + }); + + moduleMocker.spyOn(obj, 'property', 'get'); + }).toThrow('Property `property` does not have access type get'); + }); + + it('when property does not have access type set', () => { + expect(() => { + const obj = {}; + + Object.defineProperty(obj, 'property', { + configurable: true, + get() { + return 1; + }, + }); + + moduleMocker.spyOn(obj, 'property', 'set'); + }).toThrow('Property `property` does not have access type set'); + }); + + it('when trying to spy on a non function property', () => { + expect(() => { + moduleMocker.spyOn({property: 123}, 'property'); + }).toThrow( + "Cannot spy on the `property` property because it is not a function; number given instead. If you are trying to mock a property, use `jest.replaceProperty(object, 'property', value)` instead.", + ); + }); + }); + + it('supports spying on a method named `0`', () => { + let haveBeenCalled = false; + const obj = { + 0: () => { + haveBeenCalled = true; + }, + }; + + const spy = moduleMocker.spyOn(obj, 0); + obj[0].call(null); + + expect(haveBeenCalled).toBe(true); + expect(spy).toHaveBeenCalled(); + }); + + it('supports spying on a symbol-keyed method', () => { + const k = Symbol(); + + let haveBeenCalled = false; + const obj = { + [k]: () => { + haveBeenCalled = true; + }, + }; + + const spy = moduleMocker.spyOn(obj, k); + obj[k].call(null); + + expect(haveBeenCalled).toBe(true); + expect(spy).toHaveBeenCalled(); + }); + + it('supports spying on a method which is defined on a function', () => { + let haveBeenCalled = false; + const obj = () => true; + + Object.defineProperty(obj, 'method', { + configurable: true, + value: () => { + haveBeenCalled = true; + }, + writable: true, + }); + + const spy = moduleMocker.spyOn(obj, 'method'); + obj['method'].call(null); + + expect(haveBeenCalled).toBe(true); + expect(spy).toHaveBeenCalled(); }); it('supports clearing a spy', () => { @@ -1642,16 +1803,14 @@ describe('moduleMocker', () => { it('should throw on invalid input', () => { expect(() => { moduleMocker.spyOn(null, 'method'); - }).toThrow('spyOn could not find an object to spy on for method'); + }).toThrow('Cannot use spyOn on a primitive value; null given'); expect(() => { moduleMocker.spyOn({}, 'method'); - }).toThrow( - "Cannot spy on the method property because it is not a function; undefined given instead. If you are trying to mock a property, use `jest.replaceProperty(object, 'method', value)` instead.", - ); + }).toThrow('Property `method` does not exist in the provided object'); expect(() => { moduleMocker.spyOn({method: 10}, 'method'); }).toThrow( - "Cannot spy on the method property because it is not a function; number given instead. If you are trying to mock a property, use `jest.replaceProperty(object, 'method', value)` instead.", + "Cannot spy on the `method` property because it is not a function; number given instead. If you are trying to mock a property, use `jest.replaceProperty(object, 'method', value)` instead.", ); }); @@ -2018,34 +2177,23 @@ describe('moduleMocker', () => { describe('should throw', () => { it.each` - value - ${null} - ${undefined} - `('when $value is provided instead of an object', ({value}) => { - expect(() => { - moduleMocker.replaceProperty(value, 'property', 1); - }).toThrow( - 'replaceProperty could not find an object on which to replace property', - ); - }); - - it.each` - value | type - ${'foo'} | ${'string'} - ${1} | ${'number'} - ${NaN} | ${'number'} - ${1n} | ${'bigint'} - ${Symbol()} | ${'symbol'} - ${true} | ${'boolean'} - ${false} | ${'boolean'} - ${() => {}} | ${'function'} + value | type + ${'foo'} | ${'string'} + ${1} | ${'number'} + ${NaN} | ${'number'} + ${1n} | ${'bigint'} + ${Symbol()} | ${'symbol'} + ${true} | ${'boolean'} + ${false} | ${'boolean'} + ${undefined} | ${'undefined'} + ${null} | ${'null'} `( 'when primitive value $value is provided instead of an object', ({value, type}) => { expect(() => { moduleMocker.replaceProperty(value, 'property', 1); }).toThrow( - `Cannot mock property on a non-object value; ${type} given`, + `Cannot use replaceProperty on a primitive value; ${type} given`, ); }, ); @@ -2056,10 +2204,12 @@ describe('moduleMocker', () => { }).toThrow('No property name supplied'); }); - it('when property is not defined', () => { + it('when property does not exist', () => { expect(() => { moduleMocker.replaceProperty({}, 'doesNotExist', 1); - }).toThrow('doesNotExist property does not exist'); + }).toThrow( + 'Property `doesNotExist` does not exist in the provided object', + ); }); it('when property is not configurable', () => { @@ -2073,18 +2223,18 @@ describe('moduleMocker', () => { }); moduleMocker.replaceProperty(obj, 'property', 2); - }).toThrow('property is not declared configurable'); + }).toThrow('Property `property` is not declared configurable'); }); - it('when trying to mock a method', () => { + it('when trying to replace a method', () => { expect(() => { moduleMocker.replaceProperty({method: () => {}}, 'method', () => {}); }).toThrow( - "Cannot mock the method property because it is a function. Use `jest.spyOn(object, 'method')` instead.", + "Cannot replace the `method` property because it is a function. Use `jest.spyOn(object, 'method')` instead.", ); }); - it('when mocking a getter', () => { + it('when trying to replace a getter', () => { const obj = { get getter() { return 1; @@ -2093,10 +2243,12 @@ describe('moduleMocker', () => { expect(() => { moduleMocker.replaceProperty(obj, 'getter', 1); - }).toThrow('Cannot mock the getter property because it has a getter'); + }).toThrow( + 'Cannot replace the `getter` property because it has a getter', + ); }); - it('when mocking a setter', () => { + it('when trying to replace a setter', () => { const obj = { // eslint-disable-next-line accessor-pairs set setter(_value: number) {}, @@ -2104,10 +2256,48 @@ describe('moduleMocker', () => { expect(() => { moduleMocker.replaceProperty(obj, 'setter', 1); - }).toThrow('Cannot mock the setter property because it has a setter'); + }).toThrow( + 'Cannot replace the `setter` property because it has a setter', + ); }); }); + it('supports replacing a property named `0`', () => { + const obj = { + 0: 'zero', + }; + + moduleMocker.replaceProperty(obj, 0, 'null'); + + expect(obj[0]).toBe('null'); + }); + + it('supports replacing a symbol-keyed property', () => { + const k = Symbol(); + + const obj = { + [k]: 'zero', + }; + + moduleMocker.replaceProperty(obj, k, 'null'); + + expect(obj[k]).toBe('null'); + }); + + it('supports replacing a property which is defined on a function', () => { + const obj = () => true; + + Object.defineProperty(obj, 'property', { + configurable: true, + value: 'abc', + writable: true, + }); + + moduleMocker.replaceProperty(obj, 'property', 'def'); + + expect(obj['property']).toBe('def'); + }); + it('should work for property from prototype chain', () => { const parent = {property: 'abcd'}; const child = Object.create(parent);
[Bug]: jest-mock/spyOn property name check is "truthy" instead of "defined" or "existent" ### Version 29.5 ### Steps to reproduce Minimal Typescript example with real-world use case ``` enum WhyNot { A, B, C } const DynamicDispatchObject:Record<WhyNot,CallableFunction>={ [WhyNot.A]:()=>{}, [WhyNot.B]:()=>{}, [WhyNot.C]:()=>{} } // Throws with 'No property name supplied', cause WhyNot.A === 0 const spiedFunction = jest.spyOn(DynamicDispatchObject, WhyNot.A); // Typescript fails due to object 'keyof' type inference const spiedFunction = jest.spyOn(DynamicDispatchObject, String(WhyNot.A)); ``` Without Typescript it's still an issue, though the pattern is not that common in human-written code ``` const DynamicDispatchObject={ 0: console.log, 1: console.info } // throws with 'No property name supplied' jest.spyOn(DynamicDispatchObject, 0); // WORKS jest.spyOn(DynamicDispatchObject, "0"); ``` ### Expected behavior I expect any existing object property (that is a `Function`) should be available for mocking and spying 0 is perfectly valid object key, and can be used interchangeably with "0" due to javascript native type coercion, so I'd expect mocking out methods, referenced by those keys, should be possible. In typescript I'd expect it to work too, without any typecasting or `//@ts-ignore`, due to the nature of static type checking. That is fixed by using string Enums or starting them explicitly with 1, but that might not be the desired behaviour in some cases. Let alone I could as well use negative numbers as keys, which would also fail to work with `spyOn` ### Actual behavior Pretty expectedly and non-positive integer keys in objects can't be mocked due to the way the [property name check](https://github.com/facebook/jest/blob/main/packages/jest-mock/src/index.ts#L1171) is done: ``` if (!methodKey) { throw new Error('No property name supplied'); } ``` which is too broad of a restriction, and I'd further argue that the check like this is even redundant, 'cause few lines below there's a dereference with a sort of property existence check, which could be done in the first place ``` const original = object[methodKey]; if (!this.isMockFunction(original)) { if (typeof original !== 'function') { ``` ### Additional context _No response_ ### Environment ```shell System: OS: Windows 10 10.0.22621 CPU: (20) x64 12th Gen Intel(R) Core(TM) i7-12700H Binaries: Node: 18.12.1 - C:\Program Files\nodejs\node.EXE npm: 9.3.1 - C:\Program Files\nodejs\npm.CMD npmPackages: jest: ^29.3.1 => 29.5.0 ``` fix(jest-mock): spyOn should support `0` key in objects (#14077) ## Summary `jest.spyOn` previously throwed when applied to a method that is referenced by `0` key of the target module/object, thus being incompatible with design patterns, that rely on Enum indices, and Array-like objects. See [#14077](https://github.com/facebook/jest/issues/14077) ## Test plan ``` enum IndexedKeys { A, B, C } const DynamicDispatchObject:Record<IndexedKeys,CallableFunction>={ [IndexedKeys.A]:()=>{}, [IndexedKeys.B]:()=>{}, [IndexedKeys.C]:()=>{} } // Doesn't throw anymore, works as intended const spiedFunction = jest.spyOn(DynamicDispatchObject, IndexedKeys.A); ```
Just open a PR to fix this. I guess the check should be: ```ts if (methodKey == null) { throw new Error('No property name supplied'); } ``` > Just open a PR to fix this. I guess the check should be: > > ```ts > if (methodKey == null) { > throw new Error('No property name supplied'); > } > ``` that would actually not fix the issue but would rather fail the check when empty string is passed as method name of many options to exclude all numbers that are converted to string `"0"` (like `0`, `-0.0` , `0x0` and so on) from `falsy` check I've chosen to go with `Number.isFinite` which doesn't affect `NaN` and `BigInt` keys (I hope there's no good reason to use these as indices)
"2023-04-19T13:51:20Z"
29.5
[]
[ "packages/jest-mock/src/__tests__/index.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,465
jestjs__jest-14465
[ "14464" ]
f2c78d04ff35688571a66d66c8a2c7c17a095444
diff --git a/packages/jest-snapshot/src/InlineSnapshots.ts b/packages/jest-snapshot/src/InlineSnapshots.ts --- a/packages/jest-snapshot/src/InlineSnapshots.ts +++ b/packages/jest-snapshot/src/InlineSnapshots.ts @@ -274,7 +274,7 @@ const traverseAst = ( snapshotMatcherNames.push(callee.property.name); const snapshotIndex = args.findIndex( - ({type}) => type === 'TemplateLiteral', + ({type}) => type === 'TemplateLiteral' || type === 'StringLiteral', ); const {snapshot} = inlineSnapshot;
diff --git a/packages/jest-snapshot/src/__tests__/InlineSnapshots.test.ts b/packages/jest-snapshot/src/__tests__/InlineSnapshots.test.ts --- a/packages/jest-snapshot/src/__tests__/InlineSnapshots.test.ts +++ b/packages/jest-snapshot/src/__tests__/InlineSnapshots.test.ts @@ -738,3 +738,23 @@ test('saveInlineSnapshots() prioritize parser from project/editor configuration' '});\n', ); }); + +test('saveInlineSnapshots() replaces string literal, not just template literal', () => { + const filename = path.join(dir, 'my.test.js'); + fs.writeFileSync(filename, 'expect("a").toMatchInlineSnapshot("b");\n'); + + saveInlineSnapshots( + [ + { + frame: {column: 13, file: filename, line: 1} as Frame, + snapshot: 'a', + }, + ], + dir, + 'prettier', + ); + + expect(fs.readFileSync(filename, 'utf-8')).toBe( + 'expect("a").toMatchInlineSnapshot(`a`);\n', + ); +});
[Bug]: Inline snapshots are appending a new template literal instead of replacing existing string ### Version 29.6.4 ### Steps to reproduce 1. Clone https://github.com/bawjensen/jest-inline-snapshot-repro 2. `pnpm install` 3. `pnpm jest inline-snapshot.test.js --updateSnapshot` (though I had to run `pnpm jest inline-snapshot.test.js --updateSnapshot --prettierPath ''` perhaps due to quirks in my path having `prettier` on it) 4. Observe the diff in `inline-snapshot.test.js` ### Expected behavior I see ```diff test("a failing inline snapshot will append, not overwrite as desired", () => { - expect("expected string").toMatchInlineSnapshot('"incorrect string"'); + expect("expected string").toMatchInlineSnapshot(`"expected string"`); }); ``` ### Actual behavior I see ```diff test("a failing inline snapshot will append, not overwrite as desired", () => { - expect("expected string").toMatchInlineSnapshot('"incorrect string"'); + expect("expected string").toMatchInlineSnapshot('"incorrect string"', `"expected string"`); }); ``` ### Additional context Our single-line template literals are being rewritten with single quotes due to our usage of the `quotes` eslint rule, thus we run into this issue. I believe that I've traced the issue to https://github.com/jestjs/jest/blob/f2c78d04ff35688571a66d66c8a2c7c17a095444/packages/jest-snapshot/src/InlineSnapshots.ts#L277, and can confirm in my setup that patching this with ```diff const snapshotIndex = args.findIndex( - ({type}) => type === 'TemplateLiteral' + ({type}) => type === 'TemplateLiteral' || type === 'StringLiteral' ); ``` fixes the issue. I'll be pushing up a PR soon with that proposed fix. ### Environment ```shell System: OS: macOS 13.5.1 CPU: (10) x64 Apple M1 Max Binaries: Node: 16.17.1 - ~/src/coda/build/node/bin/node Yarn: 1.22.19 - /opt/homebrew/bin/yarn npm: 8.15.0 - ~/src/coda/build/node/bin/npm pnpm: 8.6.5 - ~/src/coda/build/node/bin/pnpm npmPackages: jest: ^29.6.4 => 29.6.4 ```
From what I can tell this is the same as #10164, which was closed due to inactivity but seemed like a valid issue
"2023-08-31T22:36:07Z"
29.6
[]
[ "packages/jest-snapshot/src/__tests__/InlineSnapshots.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,688
jestjs__jest-14688
[ "13936" ]
5b97c9dda5c970892a8b96bec4313cb1f60dee06
diff --git a/packages/expect-utils/src/utils.ts b/packages/expect-utils/src/utils.ts --- a/packages/expect-utils/src/utils.ts +++ b/packages/expect-utils/src/utils.ts @@ -307,8 +307,8 @@ export const iterableEquality = ( !isImmutableOrderedSet(a) && !isImmutableRecord(a) ) { - const aEntries = Object.entries(a); - const bEntries = Object.entries(b); + const aEntries = entries(a); + const bEntries = entries(b); if (!equals(aEntries, bEntries)) { return false; } @@ -320,6 +320,15 @@ export const iterableEquality = ( return true; }; +const entries = (obj: any) => { + if (!isObject(obj)) return []; + + return Object.getOwnPropertySymbols(obj) + .filter(key => key !== Symbol.iterator) + .map(key => [key, obj[key]]) + .concat(Object.entries(obj)); +}; + const isObject = (a: any) => a !== null && typeof a === 'object'; const isObjectWithKeys = (a: any) =>
diff --git a/packages/expect-utils/src/__tests__/utils.test.ts b/packages/expect-utils/src/__tests__/utils.test.ts --- a/packages/expect-utils/src/__tests__/utils.test.ts +++ b/packages/expect-utils/src/__tests__/utils.test.ts @@ -591,6 +591,36 @@ describe('iterableEquality', () => { const b = new TestRecord().set('dummy', 'data'); expect(iterableEquality(a, b)).toBe(true); }); + + test('returns true when given a symbols keys within equal objects', () => { + const KEY = Symbol(); + + const a = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + [KEY]: [], + }; + const b = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + [KEY]: [], + }; + + expect(iterableEquality(a, b)).toBe(true); + }); + + test('returns false when given a symbols keys within inequal objects', () => { + const KEY = Symbol(); + + const a = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + [KEY]: [1], + }; + const b = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + [KEY]: [], + }; + + expect(iterableEquality(a, b)).toBe(false); + }); }); describe('typeEquality', () => {
[Bug]: toEqual() doesn't work for iterable objects if objects have symbol property ### Version latest ### Steps to reproduce ```javascript const Key = Symbol() class Test { constructor(value) { this[Key] = value } [Symbol.iterator]() { return [][Symbol.iterator]() } } expect(new Test(1)).toEqual(new Test(2)) // success, because property [Key] is ignored ``` ### Expected behavior ```javascript expect(new Test(1)).toEqual(new Test(2)) // fail ``` ### Actual behavior succeed ### Additional context Object.entries() does not return symbols, so symbols are ignored https://github.com/facebook/jest/blob/1eb3bb5949f4ed1dbb39b9a9d9d76c3399ffd7b0/packages/expect-utils/src/utils.ts#L306-L310 ### Environment ```shell System: OS: macOS 12.5.1 CPU: (10) arm64 Apple M1 Pro Binaries: Node: 16.17.0 - ~/.volta/tools/image/node/16.17.0/bin/node Yarn: 1.22.19 - ~/.volta/tools/image/yarn/1.22.19/bin/yarn npm: 8.15.0 - ~/.volta/tools/image/node/16.17.0/bin/npm npmPackages: jest: latest => 29.4.3 ```
This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 30 days. This issue was closed because it has been stalled for 30 days with no activity. Please open a new issue if the issue is still relevant, linking to this one. This issue has been automatically locked since there has not been any recent activity after it was closed. Please open a new issue for related bugs. Please note this issue tracker is not a help forum. We recommend using [StackOverflow](https://stackoverflow.com/questions/tagged/jestjs) or our [discord channel](https://discord.gg/j6FKKQQrW9) for questions.
"2023-11-09T16:38:44Z"
30.0
[]
[ "packages/expect-utils/src/__tests__/utils.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,894
jestjs__jest-14894
[ "14734" ]
8bbe2a34a8a7979d71ece3fadc6006df3e607d9f
diff --git a/packages/expect-utils/src/utils.ts b/packages/expect-utils/src/utils.ts --- a/packages/expect-utils/src/utils.ts +++ b/packages/expect-utils/src/utils.ts @@ -355,12 +355,14 @@ export const subsetEquality = ( return undefined; } - return getObjectKeys(subset).every(key => { + if (seenReferences.has(subset)) return undefined; + seenReferences.set(subset, true); + + const matchResult = getObjectKeys(subset).every(key => { if (isObjectWithKeys(subset[key])) { if (seenReferences.has(subset[key])) { return equals(object[key], subset[key], filteredCustomTesters); } - seenReferences.set(subset[key], true); } const result = object != null && @@ -377,6 +379,8 @@ export const subsetEquality = ( seenReferences.delete(subset[key]); return result; }); + seenReferences.delete(subset); + return matchResult; }; return subsetEqualityWithContext()(object, subset);
diff --git a/e2e/__tests__/circularRefInBuiltInObj.test.ts b/e2e/__tests__/circularRefInBuiltInObj.test.ts new file mode 100644 --- /dev/null +++ b/e2e/__tests__/circularRefInBuiltInObj.test.ts @@ -0,0 +1,89 @@ +/** + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +describe('matches circular references nested in:', () => { + interface CircularObj { + ref: unknown; + [prop: string]: unknown; + } + + test('arrays', () => { + type CircularArray = CircularObj & {ref: Array<unknown>}; + + const a: CircularArray = {c: 1, ref: [1]}; + const b: CircularArray = {c: 1, ref: [1]}; + + a.ref.push(a); + b.ref.push(b); + expect(a).toMatchObject(b); + + b.ref = []; + expect(a).not.toMatchObject(b); + + b.ref = [1]; + expect(a).not.toMatchObject(b); + }); + + test('deeply nested array properties', () => { + type DeepCircularArray = CircularObj & {ref: {inner: Array<unknown>}}; + const a: DeepCircularArray = { + c: 1, + ref: { + inner: [1], + }, + }; + const b: DeepCircularArray = { + c: 1, + ref: { + inner: [1], + }, + }; + a.ref.inner.push(a); + b.ref.inner.push(b); + expect(a).toMatchObject(b); + + b.ref.inner = []; + expect(a).not.toMatchObject(b); + + b.ref.inner = [1]; + expect(a).not.toMatchObject(b); + }); + + test('sets', () => { + type CircularSet = CircularObj & {ref: Set<unknown>}; + + const a: CircularSet = {c: 1, ref: new Set()}; + const b: CircularSet = {c: 1, ref: new Set()}; + + a.ref.add(a); + b.ref.add(b); + expect(a).toMatchObject(b); + + b.ref.clear(); + expect(a).not.toMatchObject(b); + + b.ref.add(1); + expect(a).not.toMatchObject(b); + }); + + test('maps', () => { + type CircularMap = CircularObj & {ref: Map<string, unknown>}; + + const a: CircularMap = {c: 1, ref: new Map()}; + const b: CircularMap = {c: 1, ref: new Map()}; + + a.ref.set('innerRef', a); + b.ref.set('innerRef', b); + expect(a).toMatchObject(b); + + b.ref.clear(); + expect(a).not.toMatchObject(b); + + b.ref.set('innerRef', 1); + expect(a).not.toMatchObject(b); + }); +});
[Bug]: Stackoverflow in toMatchObject when matching w/ recursive object ### Version 29.7.0 ### Steps to reproduce Using `expect` 29.7.0 Here is a contrived example (real usage [here](https://github.com/GoogleChrome/lighthouse/blob/b75a9d98875c95d156811b3b85e796aa4024a82c/core/test/network-records-to-devtools-log.js#L477) / [here](https://github.com/GoogleChrome/lighthouse/pull/15640#issuecomment-1836904534)) ```js const a = { v: 1, }; const b = { v: 1, }; const c1 = { ref: [], }; c1.ref.push(c1); const c2 = { ref: [], }; c2.ref.push(c2); a.ref = c1; b.ref = c2; expect(a).toMatchObject(b); ``` ### Expected behavior Either document that recursive objects are not supported, or support them. ### Actual behavior `RangeError: Maximum call stack size exceeded` ### Additional context _No response_ ### Environment ```shell n/a ```
This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 30 days. We should start by adding a warning to the docs I guess, but it would be nice to support this As far as I've tested, the issue doesn't happen if the circular reference is not in an array nor if it's in a set. It also works if it's nested deeper in. I can take a crack at fixing this issue if no one else is working on it.
"2024-02-11T17:06:55Z"
30.0
[]
[ "e2e/__tests__/circularRefInBuiltInObj.test.ts" ]
TypeScript
[]
[]
jestjs/jest
14,980
jestjs__jest-14980
[ "14943", "14944" ]
d4d1f2b8004b1eb4bf0cf862698dd1142e13278f
diff --git a/packages/jest-core/src/runJest.ts b/packages/jest-core/src/runJest.ts --- a/packages/jest-core/src/runJest.ts +++ b/packages/jest-core/src/runJest.ts @@ -214,13 +214,21 @@ export default async function runJest({ if (globalConfig.listTests) { const testsPaths = [...new Set(allTests.map(test => test.path))]; - /* eslint-disable no-console */ + let testsListOutput; + if (globalConfig.json) { - console.log(JSON.stringify(testsPaths)); + testsListOutput = JSON.stringify(testsPaths); + } else { + testsListOutput = testsPaths.join('\n'); + } + + if (globalConfig.outputFile) { + const outputFile = path.resolve(process.cwd(), globalConfig.outputFile); + fs.writeFileSync(outputFile, testsListOutput, 'utf8'); } else { - console.log(testsPaths.join('\n')); + // eslint-disable-next-line no-console + console.log(testsListOutput); } - /* eslint-enable */ onComplete && onComplete(makeEmptyAggregatedTestResult()); return;
diff --git a/e2e/__tests__/__snapshots__/listTests.test.ts.snap b/e2e/__tests__/__snapshots__/listTests.test.ts.snap --- a/e2e/__tests__/__snapshots__/listTests.test.ts.snap +++ b/e2e/__tests__/__snapshots__/listTests.test.ts.snap @@ -1,5 +1,12 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP +exports[`--listTests flag --outputFile flag causes tests to be saved in the file as JSON 1`] = `"["/MOCK_ABSOLUTE_PATH/e2e/list-tests/__tests__/dummy.test.js","/MOCK_ABSOLUTE_PATH/e2e/list-tests/__tests__/other.test.js"]"`; + +exports[`--listTests flag --outputFile flag causes tests to be saved in the file in different lines 1`] = ` +"/MOCK_ABSOLUTE_PATH/e2e/list-tests/__tests__/dummy.test.js +/MOCK_ABSOLUTE_PATH/e2e/list-tests/__tests__/other.test.js" +`; + exports[`--listTests flag causes tests to be printed in different lines 1`] = ` "/MOCK_ABSOLUTE_PATH/e2e/list-tests/__tests__/dummy.test.js /MOCK_ABSOLUTE_PATH/e2e/list-tests/__tests__/other.test.js" diff --git a/e2e/__tests__/listTests.test.ts b/e2e/__tests__/listTests.test.ts --- a/e2e/__tests__/listTests.test.ts +++ b/e2e/__tests__/listTests.test.ts @@ -6,6 +6,7 @@ */ import * as path from 'path'; +import * as fs from 'graceful-fs'; import runJest from '../runJest'; const testRootDir = path.resolve(__dirname, '..', '..'); @@ -36,4 +37,52 @@ describe('--listTests flag', () => { JSON.stringify(JSON.parse(stdout).map(normalizePaths).sort()), ).toMatchSnapshot(); }); + + describe('--outputFile flag', () => { + const outputFilePath = path.resolve('.', 'test-lists.json'); + afterAll(() => { + fs.unlinkSync(outputFilePath); + }); + it('causes tests to be saved in the file as JSON', () => { + const {exitCode, stdout} = runJest('list-tests', [ + '--listTests', + '--json', + '--outputFile', + outputFilePath, + ]); + + expect(exitCode).toBe(0); + expect(stdout).toBe(''); + + const outputFileExists = fs.existsSync(outputFilePath); + expect(outputFileExists).toBe(true); + + const outputFileContent = fs.readFileSync(outputFilePath, 'utf8'); + expect(() => JSON.parse(outputFileContent)).not.toThrow(); + expect( + JSON.stringify( + JSON.parse(outputFileContent).map(normalizePaths).sort(), + ), + ).toMatchSnapshot(); + }); + + it('causes tests to be saved in the file in different lines', () => { + const {exitCode, stdout} = runJest('list-tests', [ + '--listTests', + '--outputFile', + outputFilePath, + ]); + + expect(exitCode).toBe(0); + expect(stdout).toBe(''); + + const outputFileExists = fs.existsSync(outputFilePath); + expect(outputFileExists).toBe(true); + + const outputFileContent = fs.readFileSync(outputFilePath, 'utf8'); + expect( + normalizePaths(outputFileContent).split('\n').sort().join('\n'), + ).toMatchSnapshot(); + }); + }); });
[Feature]: outputFile support for listTests option ### πŸš€ Feature Proposal Right now, the --listTests option will just write the list of tests to the console. My proposal is to use the --outputFile option along with the --listTests option to write the output to the file. ### Motivation We wanted to use the list of tests in the custom sequencers. This feature will help to store the list in the file so that the sequencers can access it. Note that this is just my usecase. There may be different use cases as well. ### Example Pass `--outputFile=test-lists.json` along with `--listTests` `yarn test --listTests --json --outputFile=test-lists.json` ### Pitch The listTests option is a core feature of Jest and would be great if it support --outputFile as well Support outputFile option for listTests option ## Summary Added to Feature Proposal as well -> https://github.com/jestjs/jest/issues/14943 πŸš€ Feature Proposal Right now, the --listTests option will just write the list of tests to the console. My proposal is to use the --outputFile option along with the --listTests option to write the output to the file. Motivation We wanted to use the list of tests in the custom sequencers. This feature will help to store the list in the file so that the sequencers can access it. Note that this is just my usecase. There may be different use cases as well. Example Pass --outputFile=test-lists.json along with --listTests yarn test --listTests --json --outputFile=test-lists.json
Raised Pull Request here -> https://github.com/jestjs/jest/pull/14944 <a href="https://api.easycla.lfx.linuxfoundation.org/v2/repository-provider/github/sign/35323119/15062869/14944/#/?version=2"><img src="https://s3.amazonaws.com/cla-project-logo-prod/cla-not-signed.svg" alt="CLA Not Signed" align="left" height="28" width="328"></a><br/><br /><ul><li>:white_check_mark: login: manoraj / name: Manoraj K (3493ace221bee9222de2cbdc7b9138927edc7e09, 29dc6551de167ff8ec8ec17e39ea118734db4e02, 04f2cc9f572b0451922178ad7851d45b1e503cb4)</li><li>:white_check_mark: login: SimenB / name: Simen Bekkhus (1cd4dbd5bb608e656405dec42752f752d78c8894, bb4bb87e5dac25b4816fbcc9bc3e0e7ce31ad32d, 13b15e5c8e30e9fca940de62efee2102b177e767, 74ad24339fb17d00f9edc2c9d03127620ab99b03, 5e61b726aa0ed8f21523bf5ef683f4563abc2736)</li><li><a href='https://api.easycla.lfx.linuxfoundation.org/v2/repository-provider/github/sign/35323119/15062869/14944/#/?version=2' target='_blank'>:x:</a> - login: @mkenchugonde . The commit (3d0a90d7c38d638d65e7fed25b9498a84ac7f626, d4841c1d3b3bb9c78fc323ba151e380cb45b8671, e004aac30652e7af7d859b22842687f57583c984, 9b3939dfe487611ba7f77ffd95593dd2f0169e25, fe67e6da4b017f44116933b1cac362432de04fb2) is not authorized under a signed CLA. <a href='https://api.easycla.lfx.linuxfoundation.org/v2/repository-provider/github/sign/35323119/15062869/14944/#/?version=2' target='_blank'>Please click here to be authorized</a>. For further assistance with EasyCLA, <a href='https://jira.linuxfoundation.org/servicedesk/customer/portal/4' target='_blank'>please submit a support request ticket</a>.</li></ul> ### <span aria-hidden="true">βœ…</span> Deploy Preview for *jestjs* ready! | Name | Link | |:-:|------------------------| |<span aria-hidden="true">πŸ”¨</span> Latest commit | 74ad24339fb17d00f9edc2c9d03127620ab99b03 | |<span aria-hidden="true">πŸ”</span> Latest deploy log | https://app.netlify.com/sites/jestjs/deploys/65f556ba501c7700086b8d91 | |<span aria-hidden="true">😎</span> Deploy Preview | [https://deploy-preview-14944--jestjs.netlify.app](https://deploy-preview-14944--jestjs.netlify.app) | |<span aria-hidden="true">πŸ“±</span> Preview on mobile | <details><summary> Toggle QR Code... </summary><br /><br />![QR Code](https://app.netlify.com/qr-code/eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1cmwiOiJodHRwczovL2RlcGxveS1wcmV2aWV3LTE0OTQ0LS1qZXN0anMubmV0bGlmeS5hcHAifQ.2DpsX0E5URg6cbBxEt83Tkf8An-k6Z0aamZt-pBebzU)<br /><br />_Use your smartphone camera to open QR code link._</details> | --- _To edit notification comments on pull requests, go to your [Netlify site configuration](https://app.netlify.com/sites/jestjs/configuration/deploys#deploy-notifications)._ @mkenchugonde hello! πŸ‘‹ could you sign the CLA? πŸ™‚ > @mkenchugonde hello! πŸ‘‹ could you sign the CLA? πŸ™‚ I am not sure how I ended up pushing commits from @mkenchugonde ( I intend to use my personal account @manoraj ). I have signed the CLA from @mkenchugonde, but it needs approval from the organization I guess. I will wait for one or two days before seeking help for next steps. If it's all you, you should be able to just rebase and change author or squash the commits
"2024-03-21T03:44:41Z"
30.0
[]
[ "e2e/__tests__/listTests.test.ts" ]
TypeScript
[]
[]
jestjs/jest
15,101
jestjs__jest-15101
[ "15025", "15091" ]
fa24a3bdd6682978d76799265016fb9d5bff135e
diff --git a/packages/expect/src/asymmetricMatchers.ts b/packages/expect/src/asymmetricMatchers.ts --- a/packages/expect/src/asymmetricMatchers.ts +++ b/packages/expect/src/asymmetricMatchers.ts @@ -136,6 +136,10 @@ class Any extends AsymmetricMatcher<any> { return typeof other == 'object'; } + if (this.sample == Array) { + return Array.isArray(other); + } + return other instanceof this.sample; } @@ -164,6 +168,10 @@ class Any extends AsymmetricMatcher<any> { return 'boolean'; } + if (Array.isArray(this.sample)) { + return 'array'; + } + return fnNameFor(this.sample); }
diff --git a/packages/expect/src/__tests__/asymmetricMatchers.test.ts b/packages/expect/src/__tests__/asymmetricMatchers.test.ts --- a/packages/expect/src/__tests__/asymmetricMatchers.test.ts +++ b/packages/expect/src/__tests__/asymmetricMatchers.test.ts @@ -6,6 +6,7 @@ * */ +import {runInNewContext} from 'node:vm'; import jestExpect from '../'; import { any, @@ -51,6 +52,7 @@ test('Any.asymmetricMatch() on primitive wrapper classes', () => { any(Boolean).asymmetricMatch(new Boolean(true)), any(BigInt).asymmetricMatch(Object(1n)), any(Symbol).asymmetricMatch(Object(Symbol())), + any(Array).asymmetricMatch(runInNewContext('[];')), /* eslint-enable */ ]) { jestExpect(test).toBe(true);
[Bug]: Native fetch Arrays Incorrectly Matched ### Version v30.0.0-alpha.3 and v29.7.0 ### Steps to reproduce 1. Create a test file with this code ```js it('matches fetch parsed Array', async () => { expect(await (await fetch('https://avatar.roblox.com/v1/users/1/outfits')).json()).toMatchObject({ data: expect.any(Array), filteredCount: expect.any(Number), total: expect.any(Number) }) }) it('matches manually parsed Array', async () => { expect(JSON.parse(await (await fetch('https://avatar.roblox.com/v1/users/1/outfits')).text())).toMatchObject({ data: expect.any(Array), filteredCount: expect.any(Number), total: expect.any(Number) }) }) ``` 2. Run Jest ### Expected behavior Jest should identify the data property in the response body parsed by fetch as an Array. ### Actual behavior Jest does not identify the data property in the response body as an Array if it is parsed by fetch, only if it is parsed using JSON.parse. <img width="356" alt="Screenshot 2024-04-12 at 8 37 47 AM" src="https://github.com/jestjs/jest/assets/29686338/ee6c9d93-8fec-4e10-831a-20812f1fa279"> <img width="338" alt="Screenshot 2024-04-12 at 8 37 30 AM" src="https://github.com/jestjs/jest/assets/29686338/a347f42c-74a2-4e07-8422-6c0d5944f9a2"> ### Additional context I am using the latest Node.js LTS version (v20.12.2) ### Environment ```shell System: OS: macOS 13.2.1 CPU: (8) arm64 Apple M1 Pro Binaries: Node: 20.12.2 - /usr/local/bin/node npm: 10.5.0 - /usr/local/bin/npm pnpm: 7.30.3 - /usr/local/bin/pnpm npmPackages: jest: ^29.7.0 => 29.7.0 ``` Fix Array matching Fixes matching type Array for arrays created in a new context. ## Summary Fixes #15025 Jest does not correctly match arrays created in different contexts. This causes it to incorrectly match arrays from HTTP requests made by native fetch, failing tests when they should be passing. ## Test plan Added a unit test to asymmetricMatchers for arrays created in a new context. Before: ![image](https://github.com/jestjs/jest/assets/29686338/dad491c0-79f7-49d6-b72e-00beeae14baf) After: ![image](https://github.com/jestjs/jest/assets/29686338/18fc67a4-8f9e-4527-9a12-b15d3b9add64)
This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 30 days. Still an issue. This is https://github.com/jestjs/jest/issues/2549 You can probably use `Array.from` in your test code, that might help. Not sure there's much else we can do πŸ€” We could fix the array case explicitly probably. `Array.isArray` works across realms Happy to take a PR fixing this for arrays > Happy to take a PR fixing this for arrays Where in the code is the array matching? I attempted to look for it and fix the issue but the codebase is way too complex. https://github.com/jestjs/jest/blob/7bffeb55a337cb2b3e5ff019cc0754be8dcf5883/packages/expect/src/asymmetricMatchers.ts#L110 Add a new `if (this.sample == Array) return Array.isArray(other)`. And maybe in `getExpectedType()`? <a href="https://api.easycla.lfx.linuxfoundation.org/v2/repository-provider/github/sign/35323119/15062869/15091/#/?version=2"><img src="https://s3.amazonaws.com/cla-project-logo-prod/cla-not-signed.svg" alt="CLA Not Signed" align="left" height="28" width="328"></a><br/><br /><ul><li><a href='https://api.easycla.lfx.linuxfoundation.org/v2/repository-provider/github/sign/35323119/15062869/15091/#/?version=2' target='_blank'>:x:</a> - login: @popeeyy / name: popeeyy . The commit (b791c90377f3941bf5a51364aa93ed341380f146, 7425a1517182b8db8618b23f270082051dca6135, c4e5710d153f585fd1c23b5a3236c10aab4093e2) is not authorized under a signed CLA. <a href='https://api.easycla.lfx.linuxfoundation.org/v2/repository-provider/github/sign/35323119/15062869/15091/#/?version=2' target='_blank'>Please click here to be authorized</a>. For further assistance with EasyCLA, <a href='https://jira.linuxfoundation.org/servicedesk/customer/portal/4' target='_blank'>please submit a support request ticket</a>.</li></ul><!-- Date Modified: 2024-05-27 21:52:40.491893 --> ### <span aria-hidden="true">βœ…</span> Deploy Preview for *jestjs* ready! Built [without sensitive environment variables](https://docs.netlify.com/configure-builds/environment-variables/#sensitive-variable-policy) | Name | Link | |:-:|------------------------| |<span aria-hidden="true">πŸ”¨</span> Latest commit | c4e5710d153f585fd1c23b5a3236c10aab4093e2 | |<span aria-hidden="true">πŸ”</span> Latest deploy log | https://app.netlify.com/sites/jestjs/deploys/665500a724c924000872565b | |<span aria-hidden="true">😎</span> Deploy Preview | [https://deploy-preview-15091--jestjs.netlify.app](https://deploy-preview-15091--jestjs.netlify.app) | |<span aria-hidden="true">πŸ“±</span> Preview on mobile | <details><summary> Toggle QR Code... </summary><br /><br />![QR Code](https://app.netlify.com/qr-code/eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1cmwiOiJodHRwczovL2RlcGxveS1wcmV2aWV3LTE1MDkxLS1qZXN0anMubmV0bGlmeS5hcHAifQ._GtRsnqzbL4pQA2AeGV1kyXZobQyMYzJwBwlVUDZfhg)<br /><br />_Use your smartphone camera to open QR code link._</details> | --- _To edit notification comments on pull requests, go to your [Netlify site configuration](https://app.netlify.com/sites/jestjs/configuration/deploys#deploy-notifications)._ Oh, can you sign the CLA? > Oh, can you sign the CLA? No. That's unfortunate, I don't think we'll be able to merge the PR in that case πŸ€” https://github.com/jestjs/jest/blob/b7ae0b85e981eae0e373d98ab2ae8fe497d90027/CONTRIBUTING.md#contributor-license-agreement-cla >In order to accept your pull request, we need you to submit a CLA. > That's unfortunate, I don't think we'll be able to merge the PR in that case πŸ€” https://github.com/jestjs/jest/blob/b7ae0b85e981eae0e373d98ab2ae8fe497d90027/CONTRIBUTING.md#contributor-license-agreement-cla > > > > >In order to accept your pull request, we need you to submit a CLA. That is quite unfortunate, but I will not be sharing my full name, address, and all the other information you should not share with random organizations to contribute a few lines of code. Hopefully someone else willing to give this information away will be able to fix this issue!
"2024-05-31T22:53:55Z"
30.0
[]
[ "packages/expect/src/__tests__/asymmetricMatchers.test.ts" ]
TypeScript
[]
[]
jestjs/jest
15,138
jestjs__jest-15138
[ "15137" ]
c54bccd657fb4cf060898717c09f633b4da3eec4
diff --git a/packages/pretty-format/src/plugins/DOMElement.ts b/packages/pretty-format/src/plugins/DOMElement.ts --- a/packages/pretty-format/src/plugins/DOMElement.ts +++ b/packages/pretty-format/src/plugins/DOMElement.ts @@ -30,16 +30,21 @@ const testHasAttribute = (val: any) => { } }; +const isCustomElement = (val: any) => { + const tagName = val?.tagName; + return ( + (typeof tagName === 'string' && tagName.includes('-')) || + testHasAttribute(val) + ); +}; + const testNode = (val: any) => { const constructorName = val.constructor.name; - const {nodeType, tagName} = val; - const isCustomElement = - (typeof tagName === 'string' && tagName.includes('-')) || - testHasAttribute(val); + const {nodeType} = val; return ( (nodeType === ELEMENT_NODE && - (ELEMENT_REGEXP.test(constructorName) || isCustomElement)) || + (ELEMENT_REGEXP.test(constructorName) || isCustomElement(val))) || (nodeType === TEXT_NODE && constructorName === 'Text') || (nodeType === COMMENT_NODE && constructorName === 'Comment') || (nodeType === FRAGMENT_NODE && constructorName === 'DocumentFragment') @@ -47,7 +52,7 @@ const testNode = (val: any) => { }; export const test: NewPlugin['test'] = (val: any) => - val?.constructor?.name && testNode(val); + (val?.constructor?.name || isCustomElement(val)) && testNode(val); type HandledType = Element | Text | Comment | DocumentFragment;
diff --git a/packages/pretty-format/src/__tests__/DOMElement.test.ts b/packages/pretty-format/src/__tests__/DOMElement.test.ts --- a/packages/pretty-format/src/__tests__/DOMElement.test.ts +++ b/packages/pretty-format/src/__tests__/DOMElement.test.ts @@ -357,12 +357,14 @@ Testing.`; customElements.define('custom-paragraph', CustomParagraphElement, { extends: 'p', }); + customElements.define('anonymous-element', class extends HTMLElement {}); const parent = document.createElement('div'); parent.innerHTML = [ '<custom-element></custom-element>', '<custom-extended-element></custom-extended-element>', '<p is="custom-paragraph"></p>', + '<anonymous-element></anonymous-element>', ].join(''); expect(parent).toPrettyPrintTo( @@ -373,6 +375,7 @@ Testing.`; ' <p', ' is="custom-paragraph"', ' />', + ' <anonymous-element />', '</div>', ].join('\n'), );
[Bug]: Jest fails to serialize custom elements with anonymous constructors ### Version latest ### Steps to reproduce Reproduction here: https://github.com/testing-library/dom-testing-library/issues/1191#issue-1465496571 ```js class MyElement extends HTMLElement {} window.customElements.define("my-element", class extends MyElement {}) window.customElements.get("my-element").name // => "" export default () => <my-element></my-element> test("Render app", () => { render(<App />) snapshot() }) ``` ### Expected behavior Should properly serialize a custom element to HTML tags ### Actual behavior Renders the entire JS object https://github.com/KonnorRogers/shoelace-nextjs-lazy/blob/main/app/__snapshots__/page.test.tsx.snap ### Additional context The `test` function in pretty-format expects the constructor to have a name https://github.com/jestjs/jest/blob/c54bccd657fb4cf060898717c09f633b4da3eec4/packages/pretty-format/src/plugins/DOMElement.ts#L49-L50 ```diff - export const test: NewPlugin['test'] = (val: any) => - val?.constructor?.name && testNode(val); + export const test: NewPlugin['test'] = (val: any) => + val?.constructor && testNode(val); ``` ### Environment ```shell System: OS: macOS 14.4 CPU: (8) arm64 Apple M1 Pro Binaries: Node: 20.9.0 - ~/.asdf/installs/nodejs/20.9.0/bin/node Yarn: 4.0.1 - ~/.asdf/installs/nodejs/20.9.0/bin/yarn npm: 10.1.0 - ~/.asdf/plugins/nodejs/shims/npm pnpm: 9.1.1 - ~/.asdf/installs/nodejs/20.9.0/bin/pnpm npmPackages: jest: ^29.7.0 => 29.7.0 ```
"2024-06-19T20:07:30Z"
30.0
[]
[ "packages/pretty-format/src/__tests__/DOMElement.test.ts" ]
TypeScript
[]
[]
jestjs/jest
8,359
jestjs__jest-8359
[ "8280" ]
103fb154910e8b9df1c4e88896bc90f07c3ec341
diff --git a/packages/expect-utils/src/utils.ts b/packages/expect-utils/src/utils.ts --- a/packages/expect-utils/src/utils.ts +++ b/packages/expect-utils/src/utils.ts @@ -254,6 +254,12 @@ export const iterableEquality = ( return false; } + const aEntries = Object.entries(a); + const bEntries = Object.entries(b); + if (!equals(aEntries, bEntries)) { + return false; + } + // Remove the first value from the stack of traversed values. aStack.pop(); bStack.pop();
diff --git a/packages/expect-utils/src/__tests__/utils.test.ts b/packages/expect-utils/src/__tests__/utils.test.ts --- a/packages/expect-utils/src/__tests__/utils.test.ts +++ b/packages/expect-utils/src/__tests__/utils.test.ts @@ -422,6 +422,49 @@ describe('iterableEquality', () => { ).toBe(false); }); + test('returns true when given iterator within equal objects', () => { + const a = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + a: [], + }; + const b = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + a: [], + }; + + expect(iterableEquality(a, b)).toBe(true); + }); + + test('returns false when given iterator within inequal objects', () => { + const a = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + a: [1], + }; + const b = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + a: [], + }; + + expect(iterableEquality(a, b)).toBe(false); + }); + + test('returns false when given iterator within inequal nested objects', () => { + const a = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + a: { + b: [1], + }, + }; + const b = { + [Symbol.iterator]: () => ({next: () => ({done: true})}), + a: { + b: [], + }, + }; + + expect(iterableEquality(a, b)).toBe(false); + }); + test('returns true when given circular Set shape', () => { const a1 = new Set(); const a2 = new Set();
isEqual() on iterable ignores other properties <!-- Love Jest? Please consider supporting our collective: πŸ‘‰ https://opencollective.com/jest/donate --> ## πŸ› Bug Report Testing iterable objects with isEqual() ignores other properties on the iterable ## To Reproduce ```js let next = () => ({ value: undefined, done: true }); test('iterable', () => { expect({ [Symbol.iterator]: () => ({ next }), data: ['foo'] }).toEqual({ [Symbol.iterator]: () => ({ next }), data: [] }) }); ``` Same behaviour when using the iterator of data property: ```js let data = ['foo'], data2 = []; test('foo', () => { expect({ [Symbol.iterator]: data[Symbol.iterator], data }).toEqual({ [Symbol.iterator]: data2[Symbol.iterator], data: data2 }) }); ``` both result in ``` PASS test/foo.test.js βœ“ foo (6ms) ``` ## Expected behavior Other properties are evaluated when object is iterable. Like when removing the iterators from above code: ``` FAIL test/foo.test.js βœ• foo (14ms) ● foo expect(received).toEqual(expected) Difference: - Expected + Received Object { - "data": Array [], + "data": Array [ + "foo", + ], } ``` ## Link to repl or repo (highly encouraged) https://repl.it/repls/CaringPrimarySupport ## Run `npx envinfo --preset jest` Paste the results here: ```bash npx: installed 1 in 1.537s System: OS: Linux 4.4 Ubuntu 16.04.1 LTS (Xenial Xerus) CPU: (8) x64 Intel(R) Core(TM) i7-6700HQ CPU @ 2.60GHz Binaries: Node: 8.9.1 - /usr/bin/node npm: 6.4.1 - /usr/bin/npm npmPackages: jest: ^24.7.1 => 24.7.1 ```
This reproduces for me, and it's a bug with our custom `iterableEquality`. With `Array`s, it works. I'll send a PR
"2019-04-22T09:41:40Z"
28.0
[]
[ "packages/expect-utils/src/__tests__/utils.test.ts" ]
TypeScript
[]
[]
statsmodels/statsmodels
6,643
statsmodels__statsmodels-6643
[ "6642" ]
4e77b4b07545f848fe001c24197cf49214a8c2d1
diff --git a/statsmodels/stats/diagnostic.py b/statsmodels/stats/diagnostic.py --- a/statsmodels/stats/diagnostic.py +++ b/statsmodels/stats/diagnostic.py @@ -802,7 +802,10 @@ def acorr_breusch_godfrey(res, nlags=None, store=False): nobs = xdall.shape[0] xdall = np.c_[np.ones((nobs, 1)), xdall] xshort = x[-nobs:] - exog = np.column_stack((exog_old, xdall)) + if exog_old is None: + exog = xdall + else: + exog = np.column_stack((exog_old, xdall)) k_vars = exog.shape[1] resols = OLS(xshort, exog).fit()
diff --git a/statsmodels/stats/tests/test_diagnostic.py b/statsmodels/stats/tests/test_diagnostic.py --- a/statsmodels/stats/tests/test_diagnostic.py +++ b/statsmodels/stats/tests/test_diagnostic.py @@ -28,6 +28,7 @@ from statsmodels.tools.tools import Bunch from statsmodels.tools.tools import add_constant from statsmodels.tsa.ar_model import AutoReg +from statsmodels.tsa.arima_model import ARMA cur_dir = os.path.abspath(os.path.dirname(__file__)) @@ -287,6 +288,11 @@ def test_acorr_breusch_godfrey_multidim(self): with pytest.raises(ValueError, match='Model resid must be a 1d array'): smsdia.acorr_breusch_godfrey(res) + def test_acorr_breusch_godfrey_exogs(self): + data = sunspots.load_pandas().data['SUNACTIVITY'] + res = ARMA(data, (1, 0)).fit(disp=False, trend='nc') + smsdia.acorr_breusch_godfrey(res, nlags=1) + def test_acorr_ljung_box(self): # unit-test which may be useful later
BUG: acorr_breusch_godfrey error in regressions without const or exogs In ARMA models without constant (or ARMAX without constant and without exogs) the acorr_breusch_godfrey method don't check if res.model.exog is None and try to use exogs returning a ValueError. #### Code Sample, a copy-pastable example if possible ```python from statsmodels.tsa.arima_model import ARMA import pandas as pd from statsmodels.datasets import sunspots data = sunspots.load_pandas().data['SUNACTIVITY'] res = ARMA(data, (1,0)).fit(disp=False, trend = 'nc') from statsmodels.stats.diagnostic import acorr_breusch_godfrey acorr_breusch_godfrey(res, nlags=1) ``` #### Expected Output N/A #### Output of ``import statsmodels.api as sm; sm.show_versions()`` 0.11.0
"2020-04-14T03:10:47Z"
0.12
[ "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_normality", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_error[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_cusum_ols", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_against_r", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_nested[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_goldfeldquandt", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_cox", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_j_class", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_rainbow", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_nested[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_lr", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_j", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_recursive_residuals", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_small_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_cox_class", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_white_error", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_arch2", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_white", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_arch", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_error[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan_nonrobust", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_big_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_hac", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_influence", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_breaks_hansen", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_harvey_collier", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey_multidim", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_basic", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_j_class", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_error[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey_multidim", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_influence", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_recursive_residuals", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_j", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_lr", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_nested[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_white_error", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_against_r", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_error[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_nested[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_big_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_white", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_cox_class", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_arch2", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan_nonrobust", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_cox", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_harvey_collier", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_basic", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_small_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_hac", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_goldfeldquandt", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_arch", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_rainbow", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_cusum_ols", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_breaks_hansen", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_normality", "statsmodels/stats/tests/test_diagnostic.py::test_gq", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_exception", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_errors_warnings", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_dof_adj", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_influence_wrapped", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-False-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-False-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_spec_white", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_direct[HC0]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-False-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_outlier_test", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_direct[nonrobust]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[0.33]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-True-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_outlier_influence_funcs", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke_no_autolag", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-False-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-True-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-True-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-False-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_period", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-True-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_auto_lag_selection", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_linear_lm_direct", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-True-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[None]", "statsmodels/stats/tests/test_diagnostic.py::test_small_skip", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-True-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-False-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[300]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-True-AIC]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-False-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_spec_white_error", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-False-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-True-BIC]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_influence_dtype", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_error", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-fitted-3]" ]
[ "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey_exogs", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey_exogs" ]
Python
[]
[]
statsmodels/statsmodels
6,909
statsmodels__statsmodels-6909
[ "6907" ]
b911f6be01f39b8d3da3398c00073c1ef18edf80
diff --git a/statsmodels/tsa/statespace/varmax.py b/statsmodels/tsa/statespace/varmax.py --- a/statsmodels/tsa/statespace/varmax.py +++ b/statsmodels/tsa/statespace/varmax.py @@ -936,14 +936,11 @@ def _set_final_predicted_state(self, exog, out_of_sample): Notes ----- - We need special handling for forecasting with `exog` or trend, because + We need special handling for forecasting with `exog`, because if we had these then the last predicted_state has been set to NaN since - we did not have the appropriate `exog` to create it. Since we handle - trend in the same way as `exog`, we still have this issue when only - trend is used without `exog`. + we did not have the appropriate `exog` to create it. """ - flag = out_of_sample and ( - self.model.k_exog > 0 or self.model.k_trend > 0) + flag = out_of_sample and self.model.k_exog > 0 if flag: tmp_endog = concat([
diff --git a/statsmodels/tsa/statespace/tests/test_varmax.py b/statsmodels/tsa/statespace/tests/test_varmax.py --- a/statsmodels/tsa/statespace/tests/test_varmax.py +++ b/statsmodels/tsa/statespace/tests/test_varmax.py @@ -1051,7 +1051,57 @@ def test_append_results(): res1.forecast(10, exog=np.ones(10))) -def test_extend_results(): [email protected]('trend', ['n', 'c', 'ct']) [email protected]('forecast', [True, False]) +def test_extend_results(trend, forecast): + endog = np.arange(200).reshape(100, 2) + trend_params = [] + if trend == 'c': + trend_params = [0.1, 0.2] + if trend == 'ct': + trend_params = [0.1, 0.2, 1., 2.] + params = np.r_[trend_params, + 0.5, -0.1, 0.0, 0.2, + 1., 0., 1.] + + mod1 = varmax.VARMAX(endog, order=(1, 0), trend=trend) + res1 = mod1.smooth(params) + if forecast: + # Call `forecast` to trigger the _set_final_exog and + # _set_final_predicted_state context managers + res1.forecast() + + mod2 = mod1.clone(endog[:50]) + res2 = mod2.smooth(params) + if forecast: + # Call `forecast` to trigger the _set_final_exog and + # _set_final_predicted_state context managers + res2.forecast() + res3 = res2.extend(endog[50:]) + + assert_allclose(res3.llf_obs, res1.llf_obs[50:]) + + for attr in [ + 'filtered_state', 'filtered_state_cov', 'predicted_state', + 'predicted_state_cov', 'forecasts', 'forecasts_error', + 'forecasts_error_cov', 'standardized_forecasts_error', + 'scaled_smoothed_estimator', + 'scaled_smoothed_estimator_cov', 'smoothing_error', + 'smoothed_state', + 'smoothed_state_cov', 'smoothed_state_autocov', + 'smoothed_measurement_disturbance', + 'smoothed_state_disturbance', + 'smoothed_measurement_disturbance_cov', + 'smoothed_state_disturbance_cov']: + desired = getattr(res1, attr) + if desired is not None: + desired = desired[..., 50:] + assert_allclose(getattr(res3, attr), desired, atol=1e-12) + + assert_allclose(res3.forecast(10), res1.forecast(10)) + + +def test_extend_results_exog(): endog = np.arange(200).reshape(100, 2) exog = np.ones(100) params = [0.1, 0.2,
VARMAXResults.extend() causes ValueError: array must not contain infs or NaNs #### Describe the bug Extending a VARMAX model with new observations throws a ValueError even though the analogous univariate extend with same syntax works for SARIMAX. #### Code Sample, a copy-pastable example if possible ```python from statsmodels.tsa.statespace.varmax import VARMAX import numpy as np np.random.seed(1) y_hist = 100*np.random.rand(50,2) model = VARMAX(endog= y_hist,order=(2,0)).fit() print("VARMAX model summary") print(model.summary()) next_y_hat = model.forecast() print("\nPredicted next value") print(next_y_hat) # simulate next observed value next_y = next_y_hat # extend model model = model.extend(endog = next_y) # ValueError: array must not contain infs or NaNs ``` <details> The analogous use of SARIMAXResults.extend() works fine: ```python from statsmodels.tsa.statespace.sarimax import SARIMAX uni_model = SARIMAX(endog=y_hist[:,1],order=(2,0,0)).fit() print("SARIMAX model summary") print(uni_model.summary()) next_y_hat_uni = uni_model.forecast() print("\nPredicted next value") print(next_y_hat_uni) # simulate next observed value next_y_uni = next_y_hat_uni # extend model uni_model = uni_model.extend(endog = next_y_uni) ``` </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output `model.extend(endog = next_y)` should return a VARMAXResultsWrapper instead of ValueError #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.7.7.final.0 OS: Darwin 18.7.0 Darwin Kernel Version 18.7.0: Mon Apr 27 20:09:39 PDT 2020; root:xnu-4903.278.35~1/RELEASE_X86_64 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.11.1 (/usr/local/lib/python3.7/site-packages/statsmodels) Required Dependencies ===================== cython: 0.29.17 (/usr/local/lib/python3.7/site-packages/Cython) numpy: 1.16.3 (/usr/local/lib/python3.7/site-packages/numpy) scipy: 1.3.1 (/usr/local/lib/python3.7/site-packages/scipy) pandas: 1.0.1 (/usr/local/lib/python3.7/site-packages/pandas) dateutil: 2.8.1 (/usr/local/lib/python3.7/site-packages/dateutil) patsy: 0.5.1 (/usr/local/lib/python3.7/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.1.1 (/usr/local/lib/python3.7/site-packages/matplotlib) backend: module://ipykernel.pylab.backend_inline cvxopt: Not installed joblib: 0.13.2 (/usr/local/lib/python3.7/site-packages/joblib) Developer Tools ================ IPython: 7.13.0 (/usr/local/lib/python3.7/site-packages/IPython) jinja2: 2.11.1 (/usr/local/lib/python3.7/site-packages/jinja2) sphinx: 2.2.0 (/usr/local/lib/python3.7/site-packages/sphinx) pygments: 2.6.1 (/usr/local/lib/python3.7/site-packages/pygments) pytest: Not installed virtualenv: Not installedINSTALLED VERSIONS ------------------ Python: 3.7.7.final.0 OS: Darwin 18.7.0 Darwin Kernel Version 18.7.0: Mon Apr 27 20:09:39 PDT 2020; root:xnu-4903.278.35~1/RELEASE_X86_64 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.11.1 (/usr/local/lib/python3.7/site-packages/statsmodels) Required Dependencies ===================== cython: 0.29.17 (/usr/local/lib/python3.7/site-packages/Cython) numpy: 1.16.3 (/usr/local/lib/python3.7/site-packages/numpy) scipy: 1.3.1 (/usr/local/lib/python3.7/site-packages/scipy) pandas: 1.0.1 (/usr/local/lib/python3.7/site-packages/pandas) dateutil: 2.8.1 (/usr/local/lib/python3.7/site-packages/dateutil) patsy: 0.5.1 (/usr/local/lib/python3.7/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.1.1 (/usr/local/lib/python3.7/site-packages/matplotlib) backend: module://ipykernel.pylab.backend_inline cvxopt: Not installed joblib: 0.13.2 (/usr/local/lib/python3.7/site-packages/joblib) Developer Tools ================ IPython: 7.13.0 (/usr/local/lib/python3.7/site-packages/IPython) jinja2: 2.11.1 (/usr/local/lib/python3.7/site-packages/jinja2) sphinx: 2.2.0 (/usr/local/lib/python3.7/site-packages/sphinx) pygments: 2.6.1 (/usr/local/lib/python3.7/site-packages/pygments) pytest: Not installed virtualenv: Not installed </details>
I also posted this as a stackoverflow question: https://stackoverflow.com/questions/63021311/varmax-results-extend-causes-valueerror-array-must-not-contain-infs-or-nans Thanks for posting this issue! It is a bug in handling `extend` with a trend. It's an easy issue to fix, but until we get a new release out there is a workaround (which admittedly is a little annoying). Instead of including a trend in the model with the `trend` argument, you can manually include it as part of `exog`. These are identical results, but the `exog` approach is a little tedious. ```python from statsmodels.tsa.statespace.varmax import VARMAX import numpy as np np.random.seed(1) y_hist = 100*np.random.rand(50,2) model = VARMAX(endog= y_hist,order=(2,0), trend='n', exog=np.ones(y_hist.shape[0])).fit() print("VARMAX model summary") print(model.summary()) next_y_hat = model.forecast(exog=[1]) print("\nPredicted next value") print(next_y_hat) # simulate next observed value next_y = next_y_hat # extend model model = model.extend(endog = next_y, exog=[1]) # ValueError: array must not contain infs or NaNs ``` Thanks for the fast and helpful response! I didn't even realize I was asking for a trend component. IIUC, With [SARIMAX](https://www.statsmodels.org/devel/generated/statsmodels.tsa.statespace.sarimax.SARIMAX.html#statsmodels.tsa.statespace.sarimax.SARIMAX), the default trend is None. So I probably wanted to do VARMAX with trend=None in the first place, but I didn't realize I had to specify that. On one hand, I get that an ARIMA model might make more sense to have a default trend of None than a non-differenced model. On the other hand, there might be something to be said for consistent defaults across statespace classes. Would you prefer to take credit for posting the answer on StackOverflow, or should I just do it with attribution?
"2020-07-22T03:49:02Z"
0.12
[ "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_forecast", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_representation", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_forecast", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::test_apply_results", "statsmodels/tsa/statespace/tests/test_varmax.py::test_misc_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[False-c]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_predict_custom_index", "statsmodels/tsa/statespace/tests/test_varmax.py::test_append_results", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[False-ct]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_vma1_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_misspecifications", "statsmodels/tsa/statespace/tests/test_varmax.py::test_forecast_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[True-n]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_recreate_model", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[False-n]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_specifications" ]
[ "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[True-c]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[True-ct]" ]
Python
[]
[]
statsmodels/statsmodels
6,969
statsmodels__statsmodels-6969
[ "6383" ]
1c82e3ab6f35920c5d76f304931ef0cfe40ffeaf
diff --git a/statsmodels/robust/scale.py b/statsmodels/robust/scale.py --- a/statsmodels/robust/scale.py +++ b/statsmodels/robust/scale.py @@ -49,6 +49,44 @@ def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median): return np.median((np.abs(a-center)) / c, axis=axis) +def iqr(a, c=Gaussian.ppf(3/4) - Gaussian.ppf(1/4), axis=0, center=np.median): + """ + The normalized interquartile range along given axis of an array + + Parameters + ---------- + a : array_like + Input array. + c : float, optional + The normalization constant, used to get consistent estimates of the + standard deviation at the normal distribution. Defined as + scipy.stats.norm.ppf(3/4.) - scipy.stats.norm.ppf(1/4.), which is + approximately 1.349. + axis : int, optional + The default is 0. Can also be None. + center : callable or float + If a callable is provided, such as the default `np.median` then it + is expected to be called center(a). The axis argument will be applied + via np.apply_over_axes. Otherwise, provide a float. + + Returns + ------- + The normalized interquartile range + """ + a = array_like(a, 'a', ndim=None) + c = float_like(c, 'c') + + if a.size == 0: + return np.nan + else: + if callable(center) and a.size: + center = np.apply_over_axes(center, a, axis) + else: + center = 0.0 + quantiles = np.quantile(a - center, [0.25, 0.75], axis=axis) + return np.squeeze(np.diff(quantiles, axis=0) / c) + + class Huber(object): """ Huber's proposal 2 for estimating location and scale jointly.
diff --git a/statsmodels/robust/tests/test_scale.py b/statsmodels/robust/tests/test_scale.py --- a/statsmodels/robust/tests/test_scale.py +++ b/statsmodels/robust/tests/test_scale.py @@ -31,6 +31,9 @@ def test_median(self): def test_mad(self): assert_almost_equal(scale.mad(self.chem), 0.52632, DECIMAL) + def test_iqr(self): + assert_almost_equal(scale.iqr(self.chem), 0.68570, DECIMAL) + def test_huber_scale(self): assert_almost_equal(scale.huber(self.chem)[0], 3.20549, DECIMAL) @@ -98,6 +101,52 @@ def test_axisneg1(self): assert_equal(m.shape, (40, 10)) +class TestIqr(object): + @classmethod + def setup_class(cls): + np.random.seed(54321) + cls.X = standard_normal((40, 10)) + + def test_iqr(self): + m = scale.iqr(self.X) + assert_equal(m.shape, (10,)) + + def test_iqr_empty(self): + empty = np.empty(0) + assert np.isnan(scale.iqr(empty)) + empty = np.empty((10, 100, 0)) + assert_equal(scale.iqr(empty, axis=1), np.empty((10, 0))) + empty = np.empty((100, 100, 0, 0)) + assert_equal(scale.iqr(empty, axis=-1), np.empty((100, 100, 0))) + + def test_iqr_center(self): + n = scale.iqr(self.X, center=0) + assert_equal(n.shape, (10,)) + + +class TestIqrAxes(object): + @classmethod + def setup_class(cls): + np.random.seed(54321) + cls.X = standard_normal((40, 10, 30)) + + def test_axis0(self): + m = scale.iqr(self.X, axis=0) + assert_equal(m.shape, (10, 30)) + + def test_axis1(self): + m = scale.iqr(self.X, axis=1) + assert_equal(m.shape, (40, 30)) + + def test_axis2(self): + m = scale.iqr(self.X, axis=2) + assert_equal(m.shape, (40, 10)) + + def test_axisneg1(self): + m = scale.iqr(self.X, axis=-1) + assert_equal(m.shape, (40, 10)) + + class TestHuber(object): @classmethod def setup_class(cls):
ENH/BUG iqr is not scaled for normal distribution https://www.statsmodels.org/stable/_modules/statsmodels/tools/eval_measures.html#iqr computes raw IQR, I thought we have scaling to normal distribution as in `robust.scale.mad` (iqr is now also available in scipy) code search finds adjustment or usage like `iqr = (q75 - q25) / 1.349` I never remember: are mad and iqr scale for variance or standard deviation (sqrt or not) and there is a bug in axis handling !
I think we can add a by default normalized iqr based on numpy percentiles in `robust.scale` (possibly with option for other distributions.
"2020-08-14T17:42:04Z"
0.12
[ "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_default", "statsmodels/robust/tests/test_scale.py::TestHuber::test_huber_result_shape", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_location", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_huberT", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_scale", "statsmodels/robust/tests/test_scale.py::TestChem::test_mad", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_Hampel", "statsmodels/robust/tests/test_scale.py::TestChem::test_mean", "statsmodels/robust/tests/test_scale.py::TestChem::test_median", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestMad::test_mad_center", "statsmodels/robust/tests/test_scale.py::TestMad::test_mad_empty", "statsmodels/robust/tests/test_scale.py::TestMad::test_mad" ]
[ "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestChem::test_iqr", "statsmodels/robust/tests/test_scale.py::TestIqr::test_iqr_empty", "statsmodels/robust/tests/test_scale.py::TestIqr::test_iqr_center", "statsmodels/robust/tests/test_scale.py::TestIqr::test_iqr" ]
Python
[]
[]
statsmodels/statsmodels
7,000
statsmodels__statsmodels-7000
[ "6999" ]
1e6b7c6ef6ad534adbd59a5efccaf8646d221146
diff --git a/statsmodels/robust/scale.py b/statsmodels/robust/scale.py --- a/statsmodels/robust/scale.py +++ b/statsmodels/robust/scale.py @@ -45,10 +45,12 @@ def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median): """ a = array_like(a, 'a', ndim=None) c = float_like(c, 'c') - if callable(center) and a.size: + if not a.size: + center = 0.0 + elif callable(center): center = np.apply_over_axes(center, a, axis) else: - center = 0.0 + center = float_like(center, "center") return np.median((np.abs(a-center)) / c, axis=axis)
diff --git a/statsmodels/robust/tests/test_scale.py b/statsmodels/robust/tests/test_scale.py --- a/statsmodels/robust/tests/test_scale.py +++ b/statsmodels/robust/tests/test_scale.py @@ -5,6 +5,7 @@ import numpy as np from numpy.random import standard_normal from numpy.testing import assert_almost_equal, assert_equal +from scipy.stats import norm as Gaussian import pytest # Example from Section 5.5, Venables & Ripley (2002) @@ -81,6 +82,12 @@ def test_mad_empty(self): def test_mad_center(self): n = scale.mad(self.X, center=0) assert_equal(n.shape, (10,)) + with pytest.raises(TypeError): + scale.mad(self.X, center=None) + assert_almost_equal(scale.mad(self.X, center=1), + np.median(np.abs(self.X - 1), + axis=0)/Gaussian.ppf(3/4.), + DECIMAL) class TestMadAxes(object):
BUG: issue with how mad handles non-callable center arguments #### Describe the bug The center argument in the mad function is supposed to set the location estimate around which the mad will be computed. If center is not callable, the the function sets center to 0, and ignores the input, but the documentation states that center can also be a float. This is what the docs say mad should return: median(abs(a - center))/c. This was already mentioned in #6969. One solution is to change the docs, and add an assert that center is callable. Another possibility is to fix the line that sets center to zero if center is not callable, assert that center is float in that case and use the passed value in the computation. Either way, I'd be happy to write a PR fixing it. #### Code Sample, a copy-pastable example if possible ```python import numpy as np from statsmodels.robust.scale import mad a = np.array([0, 1, 2]) mad(a, center=-10) #1.482602218505602 mad(a, center=0) #1.482602218505602 np.median(np.abs(a - (-10))) / .6745 #16.30837657524092, this is the right answer for mad(a, center=-10) ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `master`. If your problem has been fixed in an unreleased version, you might be able to use `master` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the master branch of this repository? It helps the limited resources if we know problems exist in the current master so that they do not need to check whether the code sample produces a bug in the next release. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output The output of mad(a, center=center) should be equal to np.median(np.abs(a - center)) / .6745. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.8.2.final.0 OS: Linux 5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: v0.12.0rc0+31.g1e6b7c6ef (/home/esmucler/Packages/statsmodels/statsmodels) Required Dependencies ===================== cython: 0.29.21 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/Cython) numpy: 1.19.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/numpy) scipy: 1.5.2 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/scipy) pandas: 1.1.0 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/pandas) dateutil: 2.8.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/dateutil) patsy: 0.5.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.3.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/matplotlib) backend: agg cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: 7.17.0 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/IPython) jinja2: 2.11.2 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/jinja2) sphinx: 3.2.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/sphinx) pygments: 2.6.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/pygments) pytest: 6.0.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/pytest) virtualenv: Not installed </details>
The code is a bug and should use the user provided center if it is not callable. It should be possible to quickfix this, so it can still go into 0.12 before Friday @bashtage
"2020-08-26T21:28:17Z"
0.12
[ "statsmodels/robust/tests/test_scale.py::TestMad::test_mad", "statsmodels/robust/tests/test_scale.py::TestMad::test_mad_empty", "statsmodels/robust/tests/test_scale.py::TestQn::test_qn_empty", "statsmodels/robust/tests/test_scale.py::TestQn::test_qn_naive", "statsmodels/robust/tests/test_scale.py::TestQn::test_qn_robustbase", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestChem::test_mad", "statsmodels/robust/tests/test_scale.py::TestChem::test_qn", "statsmodels/robust/tests/test_scale.py::TestChem::test_mean", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_huberT", "statsmodels/robust/tests/test_scale.py::TestChem::test_median", "statsmodels/robust/tests/test_scale.py::TestChem::test_iqr", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_location", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_Hampel", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_scale", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_default", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestHuber::test_huber_result_shape", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestIqr::test_iqr", "statsmodels/robust/tests/test_scale.py::TestIqr::test_iqr_empty" ]
[ "statsmodels/robust/tests/test_scale.py::TestMad::test_mad_center" ]
Python
[]
[]
statsmodels/statsmodels
7,002
statsmodels__statsmodels-7002
[ "6988" ]
1e6b7c6ef6ad534adbd59a5efccaf8646d221146
diff --git a/statsmodels/nonparametric/kde.py b/statsmodels/nonparametric/kde.py --- a/statsmodels/nonparametric/kde.py +++ b/statsmodels/nonparametric/kde.py @@ -13,30 +13,37 @@ """ import numpy as np from scipy import integrate, stats + from statsmodels.sandbox.nonparametric import kernels from statsmodels.tools.decorators import cache_readonly -from statsmodels.tools.validation import array_like +from statsmodels.tools.validation import array_like, float_like + from . import bandwidths -from .kdetools import (forrt, revrt, silverman_transform) +from .kdetools import forrt, revrt, silverman_transform from .linbin import fast_linbin -#### Kernels Switch for estimators #### +# Kernels Switch for estimators + +kernel_switch = dict( + gau=kernels.Gaussian, + epa=kernels.Epanechnikov, + uni=kernels.Uniform, + tri=kernels.Triangular, + biw=kernels.Biweight, + triw=kernels.Triweight, + cos=kernels.Cosine, + cos2=kernels.Cosine2, +) -kernel_switch = dict(gau=kernels.Gaussian, epa=kernels.Epanechnikov, - uni=kernels.Uniform, tri=kernels.Triangular, - biw=kernels.Biweight, triw=kernels.Triweight, - cos=kernels.Cosine, cos2=kernels.Cosine2) def _checkisfit(self): try: self.density - except: + except Exception: raise ValueError("Call fit to fit the density first") -#### Kernel Density Estimator Class ### - - +# Kernel Density Estimator Class class KDEUnivariate(object): """ Univariate Kernel Density Estimator. @@ -77,8 +84,17 @@ class KDEUnivariate(object): def __init__(self, endog): self.endog = array_like(endog, "endog", ndim=1, contiguous=True) - def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None, - gridsize=None, adjust=1, cut=3, clip=(-np.inf, np.inf)): + def fit( + self, + kernel="gau", + bw="normal_reference", + fft=True, + weights=None, + gridsize=None, + adjust=1, + cut=3, + clip=(-np.inf, np.inf), + ): """ Attach the density estimate to the KDEUnivariate class. @@ -95,17 +111,23 @@ def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None, - "triw" for triweight - "uni" for uniform - bw : str, float + bw : str, float, callable The bandwidth to use. Choices are: - "scott" - 1.059 * A * nobs ** (-1/5.), where A is - `min(std(X),IQR/1.34)` + `min(std(x),IQR/1.34)` - "silverman" - .9 * A * nobs ** (-1/5.), where A is - `min(std(X),IQR/1.34)` + `min(std(x),IQR/1.34)` - "normal_reference" - C * A * nobs ** (-1/5.), where C is calculated from the kernel. Equivalent (up to 2 dp) to the "scott" bandwidth for gaussian kernels. See bandwidths.py - - If a float is given, it is the bandwidth. + - If a float is given, its value is used as the bandwidth. + - If a callable is given, it's return value is used. + The callable should take exactly two parameters, i.e., + fn(x, kern), and return a float, where: + + * x - the clipped input data + * kern - the kernel instance used fft : bool Whether or not to use FFT. FFT implementation is more @@ -113,11 +135,11 @@ def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None, is implemented. If FFT is False, then a 'nobs' x 'gridsize' intermediate array is created. gridsize : int - If gridsize is None, max(len(X), 50) is used. + If gridsize is None, max(len(x), 50) is used. cut : float Defines the length of the grid past the lowest and highest values - of X so that the kernel goes to zero. The end points are - -/+ cut*bw*{min(X) or max(X)} + of x so that the kernel goes to zero. The end points are + -/+ cut*bw*{min(x) or max(x)} adjust : float An adjustment factor for the bw. Bandwidth becomes bw * adjust. @@ -126,11 +148,13 @@ def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None, KDEUnivariate The instance fit, """ - try: - bw = float(bw) - self.bw_method = "user-given" - except: + if isinstance(bw, str): self.bw_method = bw + else: + self.bw_method = "user-given" + if not callable(bw): + bw = float_like(bw, "bw") + endog = self.endog if fft: @@ -140,18 +164,32 @@ def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None, if weights is not None: msg = "Weights are not implemented for fft" raise NotImplementedError(msg) - density, grid, bw = kdensityfft(endog, kernel=kernel, bw=bw, - adjust=adjust, weights=weights, gridsize=gridsize, - clip=clip, cut=cut) + density, grid, bw = kdensityfft( + endog, + kernel=kernel, + bw=bw, + adjust=adjust, + weights=weights, + gridsize=gridsize, + clip=clip, + cut=cut, + ) else: - density, grid, bw = kdensity(endog, kernel=kernel, bw=bw, - adjust=adjust, weights=weights, gridsize=gridsize, - clip=clip, cut=cut) + density, grid, bw = kdensity( + endog, + kernel=kernel, + bw=bw, + adjust=adjust, + weights=weights, + gridsize=gridsize, + clip=clip, + cut=cut, + ) self.density = density self.support = grid self.bw = bw - self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice, - # should this passed to funcs? + self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice, + # should this passed to funcs? # put here to ensure empty cache after re-fit with new options self.kernel.weights = weights if weights is not None: @@ -170,18 +208,22 @@ def cdf(self): """ _checkisfit(self) kern = self.kernel - if kern.domain is None: # TODO: test for grid point at domain bound - a,b = -np.inf,np.inf + if kern.domain is None: # TODO: test for grid point at domain bound + a, b = -np.inf, np.inf else: - a,b = kern.domain - func = lambda x,s: kern.density(s,x) + a, b = kern.domain + + def func(x, s): + return kern.density(s, x) support = self.support - support = np.r_[a,support] + support = np.r_[a, support] gridsize = len(support) endog = self.endog - probs = [integrate.quad(func, support[i - 1], support[i], - args=endog)[0] for i in range(1, gridsize)] + probs = [ + integrate.quad(func, support[i - 1], support[i], args=endog)[0] + for i in range(1, gridsize) + ] return np.cumsum(probs) @cache_readonly @@ -220,9 +262,9 @@ def entropy(self): """ _checkisfit(self) - def entr(x,s): - pdf = kern.density(s,x) - return pdf*np.log(pdf+1e-12) + def entr(x, s): + pdf = kern.density(s, x) + return pdf * np.log(pdf + 1e-12) kern = self.kernel @@ -231,7 +273,7 @@ def entr(x,s): else: a, b = -np.inf, np.inf endog = self.endog - #TODO: below could run into integr problems, cf. stats.dist._entropy + # TODO: below could run into integr problems, cf. stats.dist._entropy return -integrate.quad(entr, a, b, args=(endog,))[0] @cache_readonly @@ -261,16 +303,24 @@ def evaluate(self, point): return self.kernel.density(self.endog, point) -#### Kernel Density Estimator Functions #### - -def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None, - adjust=1, clip=(-np.inf, np.inf), cut=3, retgrid=True): +# Kernel Density Estimator Functions +def kdensity( + x, + kernel="gau", + bw="normal_reference", + weights=None, + gridsize=None, + adjust=1, + clip=(-np.inf, np.inf), + cut=3, + retgrid=True, +): """ Rosenblatt-Parzen univariate kernel density estimator. Parameters ---------- - X : array_like + x : array_like The variable for which the density estimate is desired. kernel : str The Kernel to be used. Choices are @@ -281,24 +331,38 @@ def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None - "tri" for triangular - "triw" for triweight - "uni" for uniform - bw : str, float - "scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34) - "silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34) - If a float is given, it is the bandwidth. + bw : str, float, callable + The bandwidth to use. Choices are: + + - "scott" - 1.059 * A * nobs ** (-1/5.), where A is + `min(std(x),IQR/1.34)` + - "silverman" - .9 * A * nobs ** (-1/5.), where A is + `min(std(x),IQR/1.34)` + - "normal_reference" - C * A * nobs ** (-1/5.), where C is + calculated from the kernel. Equivalent (up to 2 dp) to the + "scott" bandwidth for gaussian kernels. See bandwidths.py + - If a float is given, its value is used as the bandwidth. + - If a callable is given, it's return value is used. + The callable should take exactly two parameters, i.e., + fn(x, kern), and return a float, where: + + * x - the clipped input data + * kern - the kernel instance used + weights : array or None - Optional weights. If the X value is clipped, then this weight is + Optional weights. If the x value is clipped, then this weight is also dropped. gridsize : int - If gridsize is None, max(len(X), 50) is used. + If gridsize is None, max(len(x), 50) is used. adjust : float An adjustment factor for the bw. Bandwidth becomes bw * adjust. clip : tuple - Observations in X that are outside of the range given by clip are - dropped. The number of observations in X is then shortened. + Observations in x that are outside of the range given by clip are + dropped. The number of observations in x is then shortened. cut : float - Defines the length of the grid past the lowest and highest values of X + Defines the length of the grid past the lowest and highest values of x so that the kernel goes to zero. The end points are - -/+ cut*bw*{min(X) or max(X)} + -/+ cut*bw*{min(x) or max(x)} retgrid : bool Whether or not to return the grid over which the density is estimated. @@ -314,16 +378,16 @@ def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more computationally efficient version. """ - X = np.asarray(X) - if X.ndim == 1: - X = X[:, None] - clip_x = np.logical_and(X > clip[0], X < clip[1]) - X = X[clip_x] + x = np.asarray(x) + if x.ndim == 1: + x = x[:, None] + clip_x = np.logical_and(x > clip[0], x < clip[1]) + x = x[clip_x] - nobs = len(X) # after trim + nobs = len(x) # after trim if gridsize is None: - gridsize = max(nobs,50) # do not need to resize if no FFT + gridsize = max(nobs, 50) # do not need to resize if no FFT # handle weights if weights is None: @@ -334,7 +398,7 @@ def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None weights = np.asarray(weights) if len(weights) != len(clip_x): - msg = "The length of the weights must be the same as the given X." + msg = "The length of the weights must be the same as the given x." raise ValueError(msg) weights = weights[clip_x.squeeze()] q = weights.sum() @@ -342,48 +406,66 @@ def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None # Get kernel object corresponding to selection kern = kernel_switch[kernel]() - # if bw is None, select optimal bandwidth for kernel - try: - bw = float(bw) - except: - bw = bandwidths.select_bandwidth(X, bw, kern) + if callable(bw): + bw = float(bw(x, kern)) + # user passed a callable custom bandwidth function + elif isinstance(bw, str): + bw = bandwidths.select_bandwidth(x, bw, kern) + # will cross-val fit this pattern? + else: + bw = float_like(bw, "bw") + bw *= adjust - a = np.min(X, axis=0) - cut * bw - b = np.max(X, axis=0) + cut * bw + a = np.min(x, axis=0) - cut * bw + b = np.max(x, axis=0) + cut * bw grid = np.linspace(a, b, gridsize) - k = (X.T - grid[:, None])/bw # uses broadcasting to make a gridsize x nobs + k = ( + x.T - grid[:, None] + ) / bw # uses broadcasting to make a gridsize x nobs # set kernel bandwidth kern.seth(bw) # truncate to domain - if kern.domain is not None: # will not work for piecewise kernels like parzen + if ( + kern.domain is not None + ): # will not work for piecewise kernels like parzen z_lo, z_high = kern.domain domain_mask = (k < z_lo) | (k > z_high) - k = kern(k) # estimate density + k = kern(k) # estimate density k[domain_mask] = 0 else: - k = kern(k) # estimate density + k = kern(k) # estimate density - k[k < 0] = 0 # get rid of any negative values, do we need this? + k[k < 0] = 0 # get rid of any negative values, do we need this? - dens = np.dot(k, weights)/(q*bw) + dens = np.dot(k, weights) / (q * bw) if retgrid: return dens, grid, bw else: return dens, bw -def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None, - adjust=1, clip=(-np.inf, np.inf), cut=3, retgrid=True): + +def kdensityfft( + x, + kernel="gau", + bw="normal_reference", + weights=None, + gridsize=None, + adjust=1, + clip=(-np.inf, np.inf), + cut=3, + retgrid=True, +): """ Rosenblatt-Parzen univariate kernel density estimator Parameters ---------- - X : array_like + x : array_like The variable for which the density estimate is desired. kernel : str ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED. @@ -395,26 +477,40 @@ def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=N "par" for Parzen "rect" for rectangular "tri" for triangular - bw : str, float - "scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34) - "silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34) - If a float is given, it is the bandwidth. + bw : str, float, callable + The bandwidth to use. Choices are: + + - "scott" - 1.059 * A * nobs ** (-1/5.), where A is + `min(std(x),IQR/1.34)` + - "silverman" - .9 * A * nobs ** (-1/5.), where A is + `min(std(x),IQR/1.34)` + - "normal_reference" - C * A * nobs ** (-1/5.), where C is + calculated from the kernel. Equivalent (up to 2 dp) to the + "scott" bandwidth for gaussian kernels. See bandwidths.py + - If a float is given, its value is used as the bandwidth. + - If a callable is given, it's return value is used. + The callable should take exactly two parameters, i.e., + fn(x, kern), and return a float, where: + + * x - the clipped input data + * kern - the kernel instance used + weights : array or None WEIGHTS ARE NOT CURRENTLY IMPLEMENTED. - Optional weights. If the X value is clipped, then this weight is + Optional weights. If the x value is clipped, then this weight is also dropped. gridsize : int - If gridsize is None, min(len(X), 512) is used. Note that the provided + If gridsize is None, min(len(x), 512) is used. Note that the provided number is rounded up to the next highest power of 2. adjust : float An adjustment factor for the bw. Bandwidth becomes bw * adjust. clip : tuple - Observations in X that are outside of the range given by clip are - dropped. The number of observations in X is then shortened. + Observations in x that are outside of the range given by clip are + dropped. The number of observations in x is then shortened. cut : float - Defines the length of the grid past the lowest and highest values of X + Defines the length of the grid past the lowest and highest values of x so that the kernel goes to zero. The end points are - -/+ cut*bw*{X.min() or X.max()} + -/+ cut*bw*{x.min() or x.max()} retgrid : bool Whether or not to return the grid over which the density is estimated. @@ -446,48 +542,54 @@ def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=N the Fast Fourier Transform. Journal of the Royal Statistical Society. Series C. 31.2, 93-9. """ - X = np.asarray(X) - X = X[np.logical_and(X > clip[0], X < clip[1])] # will not work for two columns. - # will affect underlying data? + x = np.asarray(x) + # will not work for two columns. + x = x[np.logical_and(x > clip[0], x < clip[1])] # Get kernel object corresponding to selection kern = kernel_switch[kernel]() - try: - bw = float(bw) - except: - bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern? + if callable(bw): + bw = float(bw(x, kern)) + # user passed a callable custom bandwidth function + elif isinstance(bw, str): + # if bw is None, select optimal bandwidth for kernel + bw = bandwidths.select_bandwidth(x, bw, kern) + # will cross-val fit this pattern? + else: + bw = float_like(bw, "bw") + bw *= adjust - nobs = len(X) # after trim + nobs = len(x) # after trim # 1 Make grid and discretize the data if gridsize is None: - gridsize = np.max((nobs, 512.)) - gridsize = 2**np.ceil(np.log2(gridsize)) # round to next power of 2 + gridsize = np.max((nobs, 512.0)) + gridsize = 2 ** np.ceil(np.log2(gridsize)) # round to next power of 2 - a = np.min(X) - cut * bw - b = np.max(X) + cut * bw - grid,delta = np.linspace(a, b, int(gridsize), retstep=True) + a = np.min(x) - cut * bw + b = np.max(x) + cut * bw + grid, delta = np.linspace(a, b, int(gridsize), retstep=True) RANGE = b - a -#TODO: Fix this? -# This is the Silverman binning function, but I believe it's buggy (SS) -# weighting according to Silverman -# count = counts(X,grid) -# binned = np.zeros_like(grid) #xi_{k} in Silverman -# j = 0 -# for k in range(int(gridsize-1)): -# if count[k]>0: # there are points of X in the grid here -# Xingrid = X[j:j+count[k]] # get all these points -# # get weights at grid[k],grid[k+1] -# binned[k] += np.sum(grid[k+1]-Xingrid) -# binned[k+1] += np.sum(Xingrid-grid[k]) -# j += count[k] -# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta - -#NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING - binned = fast_linbin(X, a, b, gridsize) / (delta * nobs) + # TODO: Fix this? + # This is the Silverman binning function, but I believe it's buggy (SS) + # weighting according to Silverman + # count = counts(x,grid) + # binned = np.zeros_like(grid) #xi_{k} in Silverman + # j = 0 + # for k in range(int(gridsize-1)): + # if count[k]>0: # there are points of x in the grid here + # Xingrid = x[j:j+count[k]] # get all these points + # # get weights at grid[k],grid[k+1] + # binned[k] += np.sum(grid[k+1]-Xingrid) + # binned[k+1] += np.sum(Xingrid-grid[k]) + # j += count[k] + # binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta + + # NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING + binned = fast_linbin(x, a, b, gridsize) / (delta * nobs) # step 2 compute FFT of the weights, using Munro (1976) FFT convention y = forrt(binned) @@ -495,10 +597,11 @@ def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=N # step 3 and 4 for optimal bw compute zstar and the density estimate f # do not have to redo the above if just changing bw, ie., for cross val -#NOTE: silverman_transform is the closed form solution of the FFT of the -#gaussian kernel. Not yet sure how to generalize it. - zstar = silverman_transform(bw, gridsize, RANGE)*y # 3.49 in Silverman - # 3.50 w Gaussian kernel + # NOTE: silverman_transform is the closed form solution of the FFT of the + # gaussian kernel. Not yet sure how to generalize it. + zstar = silverman_transform(bw, gridsize, RANGE) * y + # 3.49 in Silverman + # 3.50 w Gaussian kernel f = revrt(zstar) if retgrid: return f, grid, bw
diff --git a/statsmodels/nonparametric/tests/test_kde.py b/statsmodels/nonparametric/tests/test_kde.py --- a/statsmodels/nonparametric/tests/test_kde.py +++ b/statsmodels/nonparametric/tests/test_kde.py @@ -8,6 +8,7 @@ from statsmodels.distributions.mixture_rvs import mixture_rvs from statsmodels.nonparametric.kde import KDEUnivariate as KDE import statsmodels.sandbox.nonparametric.kernels as kernels +import statsmodels.nonparametric.bandwidths as bandwidths # get results from Stata @@ -348,3 +349,42 @@ def test_fit_self(reset_randomstate): kde = KDE(x) assert isinstance(kde, KDE) assert isinstance(kde.fit(), KDE) + + +class TestKDECustomBandwidth(object): + decimal_density = 7 + + @classmethod + def setup_class(cls): + cls.kde = KDE(Xi) + cls.weights_200 = np.linspace(1, 100, 200) + cls.weights_100 = np.linspace(1, 100, 100) + + def test_check_is_fit_ok_with_custom_bandwidth(self): + def custom_bw(X, kern): + return np.std(X) * len(X) + kde = self.kde.fit(bw=custom_bw) + assert isinstance(kde, KDE) + + def test_check_is_fit_ok_with_standard_custom_bandwidth(self): + # Note, we are passing the function, not the string - this is intended + kde = self.kde.fit(bw=bandwidths.bw_silverman) + s1 = kde.support.copy() + d1 = kde.density.copy() + + kde = self.kde.fit(bw='silverman') + + npt.assert_almost_equal(s1, kde.support, self.decimal_density) + npt.assert_almost_equal(d1, kde.density, self.decimal_density) + + @pytest.mark.parametrize("fft", [True, False]) + def test_check_is_fit_ok_with_float_bandwidth(self, fft): + # Note, we are passing the function, not the string - this is intended + kde = self.kde.fit(bw=bandwidths.bw_silverman, fft=fft) + s1 = kde.support.copy() + d1 = kde.density.copy() + + kde = self.kde.fit(bw=kde.bw, fft=fft) + + npt.assert_almost_equal(s1, kde.support, self.decimal_density) + npt.assert_almost_equal(d1, kde.density, self.decimal_density)
ENH: Let users set KDE bandwidth use a user-defined bandwidth function #### Describe the bug Unexpected exception raised for KDE fit() when input data has exactly zero bandwidth. #### Code Sample (copy-paste works) ```python from statsmodels.nonparametric.kde import KDEUnivariate import numpy as np kde = KDEUnivariate(np.array([0.1, 0.1])) kde.fit() ``` <details> I did have checked wether there is a similar open bug and couldn't find one. I did look at the source code on master, but did not run it and seems the behavior is there as well. </details> #### Expected Output If running statsmodels in a pipeline then the user data might be anything, and would expect it handles this gracefully instead of raising an exception. This would allow the user to see a nice graph in eg seaborn instead of an error for a very simple case. ```python In [1]: print(kde.density) Out[1]: array([ ... ]) In [1]: print(kde.support) Out[1]: array([ ... ]) ``` Also behavior of `[0.1, 0.1]` should be very similar to `[0.1, 0.1000000000001]` (consider details points 1, 2, 3 below): ```python from statsmodels.nonparametric.kde import KDEUnivariate import numpy as np kde = KDEUnivariate(np.array([0.1, 0.1000000000001])) kde.fit() ``` <details> 1. The == 0 in `nonparametric/bandwidths.py:171 select_bandwidth()` comparing with a float is not kosher numerical methods practice, usually a comparison using a very small error term `eps` should be done. 2. The silverman bandwidth estimation might seem to be correct (== 0) in a situation where two observations that are the same. The bandwidth of a gaussian is actually not zero, but also not trivial to calculate. Closest reference I could find is [this](https://math.stackexchange.com/questions/2693800/probability-that-two-consecutively-generated-integers-with-normal-distribution-a). Net, in this case, the standard deviation estimator `std()` is not the best linear unbiased estimator for the standard deviation due to low number of samples and the uncertainty `eps`. Based on this approach, I would suggest to use in this case `A == max(min(std(X), IQR/1.34), eps * scipy.stats.norm.ppf(q=1-eps/mu))` for input to the silverman or scott bandwidth estimators. In case `eps` is not specified just use 90% confidence directly, however the user should be allowed to specify it as well. The logic is quite simple: the uncertainty `eps` when compared to the average `mu` gives a certain confidence level (90% in my example if we take `eps=0.01` ie one digit lower) which translates to an equivalent standard deviation based on that uncertainty ie `1.21 * eps`. TL;DR The `eps` acts as a lower bound to standard deviation for cases with low number of observations. 3. While raising an exception in this case might seem pythonic, it's still not warranted, as one could still have a result out of this using a fitted Gaussian with `avg=0.1` and `std=0.012815515655446004` (based on point 2 above). Think about what a normal user has to do: handle exception by monkey patching the kde results so it can be used further down the code. </details> #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.8.5.final.0 OS: Linux 5.7.16-200.fc32.x86_64 #1 SMP Wed Aug 19 16:58:53 UTC 2020 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.11.0 (/usr/lib64/python3.8/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.18.4 (/usr/lib64/python3.8/site-packages/numpy) scipy: 1.4.1 (/usr/lib64/python3.8/site-packages/scipy) pandas: 0.25.3 (/usr/lib64/python3.8/site-packages/pandas) dateutil: 2.8.0 (/usr/lib/python3.8/site-packages/dateutil) patsy: 0.5.1 (/usr/lib/python3.8/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.2.2 (/usr/lib64/python3.8/site-packages/matplotlib) backend: Qt5Agg cvxopt: Not installed joblib: 0.13.2 (/usr/lib/python3.8/site-packages/joblib) Developer Tools ================ IPython: 7.12.0 (/usr/lib/python3.8/site-packages/IPython) jinja2: 2.11.2 (/usr/lib/python3.8/site-packages/jinja2) sphinx: 2.2.2 (/usr/lib/python3.8/site-packages/sphinx) pygments: 2.4.2 (/usr/lib/python3.8/site-packages/pygments) pytest: 4.6.11 (/usr/lib/python3.8/site-packages) virtualenv: Not installed </details>
In master you get ``` RuntimeError: Selected KDE bandwidth is 0. Cannot estimate density. Either provide the bandwidth during initialization or use an alternative method. ``` This is the expected outcome. It is then up to downstream users to try catch if they think they might pass unsuitable data. All you have to do is ``` kde = KDEUnivariate(np.array([0.1, 0.1])) try: kde.fit() except RuntimeError: kde.fit(bw=MY_PERSONAL_FAVORITE_BANDWIDTH_ESTIMATOR) ``` Thanks for taking the time to look at it. That is exactly what I am doing, however the behavior is not consistent. Even in that case, one would raise an Exception and get the other bw estimator. In the normal case I would go with the default bw resulting in numerically different results for numerically close inputs. My point is `KDEUnivariate(np.array([0.1, 0.1000000000001]))` and `KDEUnivariate(np.array([0.1, 0.1]))` should provide similar results. > Even in that case, one would raise an Exception and get the other bw estimator. We try to not do magic things; if the function call asks for something it should either succeed, or if it cannot succeed, raise a meaningful error. This is why we will never override the option provided. > My point is `KDEUnivariate(np.array([0.1, 0.1000000000001]))` and `KDEUnivariate(np.array([0.1, 0.1]))` should provide similar results. Those arrays are numerically different within the precision of any double precision IEEE floating point compliant computer, so I don't see why they should provide the same behavior. ``` a = np.array([0.1, 0.1]) b = np.array([0.1, 0.1000000000001]) a==b Out[5]: array([ True, False]) ``` There are many other places where the output is not continuous as values approach numeric limits, for example, anything that performs a matrix rank check and raises on singular arrays will be discontinuous local to the tolerance of the array's eigenvalues. ### Ways forwards 1. IMO a better idea would be to let `bw` accept a `Callable[[ndarray], float]` so that a user could provide their favorite BW function that handles edges in the way they want rather than some ad-hoc approach. 2. Add a new string method that implements some minimum bandwitdh. As to your original suggestion, ``` A = max(min(std(X), IQR/1.34), eps * scipy.stats.norm.ppf(q=1-eps/mu)) ``` this is problematic if `mu` is even modestly large, in which case you have `eps*inf`. In general, it is hard to set an absolute lower bound since KDE should work reasonably well for both ``` x = np.random.standard_normal(1000) kde = KDEUnivariate(x) # OMG, why doesn't fit return self?!?!? kde.fit() eps = np.finfo(float).eps kde2 = KDEUnivariate(eps*x) kde2.fit() print(kde2.bw / kde.bw) kde2.bw / kde.bw Out[14]: 2.220446049250313e-16 print(eps) Out[15]: 2.220446049250313e-16 ``` Excellent point with the eps / mu division. The suggestions for 1 & 2 seem right initially, here is my input: 1. callable would be super flexible - this would be my main go-to, but again, what would prevent someone from doing even today this ```python bw = callable(X) kde.fit(bw=bw) ``` 2. 'min_normal_reference', 'min_silverman' and 'min_scott' might be confusing to people, and not sure everyone has the need of a min-bandwith estimator. It would work though, I agree. I did the most sensible thing in this case and got the book from Silverman and looked it up. He does mention that the approximation is only valid in asymptotic cases ref p. 103-104 (I transcribe it here under fair use) in context of Nearest-Neighbour. I beleive his statement is as equally valid for the normal "rule of thumb" case also from p. 45. > If we presume that the likely values of f" are proportional to those of f (i.e. lower values of f" in the tails) then (5.9) suggests that the ideal local bandwidth will be proportional to f^(-1/5). This is a rather heuristic argument depending on asymptotic calculations and possibly rash assumptions. I hope I'm not that rusty, but for Gaussian: ![formula](https://render.githubusercontent.com/render/math?math=\Large%20f''(x)=\frac{(x-\mu)^2-\sigma^2}{\sigma^4}f(x)) Considering ![formula](https://render.githubusercontent.com/render/math?math=\Large%20f~\frac{1}{\sigma}) then that would make the bandwidth: ![formula](https://render.githubusercontent.com/render/math?math=\Large%20h~\lvert%20\frac{(x-\mu)^2-\sigma^2}{\sigma^4}%20\rvert^{-\frac{2}{5}}\frac{1}{\sigma^{\frac{1}{5}}}) Note that in his p. 45 exposition he considers the integral of ```f''^2``` which is then asymptotic (tails at -inf and +inf), as such he only uses the last part of the term with sigma to the 5th power. The key is in the first part as well in my opinion. So for asymptotic cases ```x >> mu``` (Silverman's approximation from p. 45): ![formula](https://render.githubusercontent.com/render/math?math=\Large%20h~\sigma^{\frac{7}{5}}~\sigma^1.4\approx\sigma) For non-asymptotic cases ```x ~ mu```: ![formula](https://render.githubusercontent.com/render/math?math=\Large%20h~\sigma^{\frac{3}{5}}~\sigma^0.6) On top, the expansion from the exponential part of ```f``` comes into play which for the non-asymptotic case we can aproximate easily around zero as ```1-0.5*(x-mu)^2/sigma^2``` so another -2 to the exponent for our initial calculation. This would bring finally the non-asymptotic case ![formula](https://render.githubusercontent.com/render/math?math=\Large%20h~A\sigma^{-1.4}%2BB\sigma^{0.6}) which is exactly the reverse of the asymptotic case for the first case plus some term. Net, we also need to distinguish between [0.1, 0.1] and [0.1, ... 0.1] with 200 elements. The former one would have a high bandwidth while the latter one would have a very small bandwidth but not zero. Based on this I would propose to implement a new string bandwidth for 'non_asymptotic' as suggested in proposal 2 with the approach above. Need to figure out the constants that go in front. What I like about the new 'non_asymptotic' case is that if you would have an array with two values which are quite far apart but you know are coming from a normal distribution then the bandwidth will be quite big, which is what one would expect instead of getting some graph with two bumps.
"2020-08-27T07:31:52Z"
0.12
[ "statsmodels/nonparametric/tests/test_kde.py::TestKDEWGauss::test_compare", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWGauss::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWGauss::test_kernel_constants", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWCos2::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWCos2::test_kernel_constants", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWCos2::test_compare", "statsmodels/nonparametric/tests/test_kde.py::TestKDEBiweight::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDEBiweight::test_density", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWCos::test_kernel_constants", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWCos::test_compare", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWEpa::test_kernel_constants", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWEpa::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWEpa::test_compare", "statsmodels/nonparametric/tests/test_kde.py::TestKDETriangular::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDETriangular::test_density", "statsmodels/nonparametric/tests/test_kde.py::TestKDEExceptions::test_non_gaussian_fft_exception", "statsmodels/nonparametric/tests/test_kde.py::TestKDEExceptions::test_check_is_fit_exception", "statsmodels/nonparametric/tests/test_kde.py::TestKDEExceptions::test_wrong_weight_length_exception", "statsmodels/nonparametric/tests/test_kde.py::TestKDEExceptions::test_non_weighted_fft_exception", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGaussFFT::test_density", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGaussFFT::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKdeWeights::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKdeWeights::test_density", "statsmodels/nonparametric/tests/test_kde.py::TestKDEEpanechnikov::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDEEpanechnikov::test_density", "statsmodels/nonparametric/tests/test_kde.py::TestNormConstant::test_norm_constant_calculation", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGauss::test_density", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGauss::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGauss::test_support_gridded", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGauss::test_icdf_gridded", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGauss::test_sf_gridded", "statsmodels/nonparametric/tests/test_kde.py::TestKDEGauss::test_cdf_gridded", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWBiw::test_kernel_constants", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWBiw::test_compare", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWBiw::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWTri::test_kernel_constants", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWTri::test_compare", "statsmodels/nonparametric/tests/test_kde.py::TestKDEWTri::test_evaluate", "statsmodels/nonparametric/tests/test_kde.py::test_fit_self", "statsmodels/nonparametric/tests/test_kde.py::test_kde_bw_positive" ]
[ "statsmodels/nonparametric/tests/test_kde.py::TestKDECustomBandwidth::test_check_is_fit_ok_with_standard_custom_bandwidth", "statsmodels/nonparametric/tests/test_kde.py::TestKDECustomBandwidth::test_check_is_fit_ok_with_custom_bandwidth", "statsmodels/nonparametric/tests/test_kde.py::TestKDECustomBandwidth::test_check_is_fit_ok_with_float_bandwidth[False]", "statsmodels/nonparametric/tests/test_kde.py::TestKDECustomBandwidth::test_check_is_fit_ok_with_float_bandwidth[True]" ]
Python
[]
[]
statsmodels/statsmodels
7,017
statsmodels__statsmodels-7017
[ "7009" ]
b37e7d9931a6fa93142f6e6e3a281b0d3ca46abe
diff --git a/statsmodels/tsa/statespace/varmax.py b/statsmodels/tsa/statespace/varmax.py --- a/statsmodels/tsa/statespace/varmax.py +++ b/statsmodels/tsa/statespace/varmax.py @@ -454,16 +454,14 @@ def param_names(self): # 1. Intercept terms if self.k_trend > 0: - for i in self.polynomial_trend.nonzero()[0]: - if i == 0: - param_names += ['intercept.%s' % endog_names[j] - for j in range(self.k_endog)] - elif i == 1: - param_names += ['drift.%s' % endog_names[j] - for j in range(self.k_endog)] - else: - param_names += ['trend.%d.%s' % (i, endog_names[j]) - for j in range(self.k_endog)] + for j in range(self.k_endog): + for i in self.polynomial_trend.nonzero()[0]: + if i == 0: + param_names += ['intercept.%s' % endog_names[j]] + elif i == 1: + param_names += ['drift.%s' % endog_names[j]] + else: + param_names += ['trend.%d.%s' % (i, endog_names[j])] # 2. AR terms param_names += [ @@ -724,7 +722,7 @@ def update(self, params, transformed=True, includes_fixed=False, # just += later if not self.mle_regression: zero = np.array(0, dtype=params.dtype) - self.ssm[self._idx_state_intercept] = zero + self.ssm['state_intercept', :] = zero trend_params = params[self._params_trend].reshape( self.k_endog, self.k_trend).T
diff --git a/statsmodels/tsa/statespace/tests/test_varmax.py b/statsmodels/tsa/statespace/tests/test_varmax.py --- a/statsmodels/tsa/statespace/tests/test_varmax.py +++ b/statsmodels/tsa/statespace/tests/test_varmax.py @@ -1223,3 +1223,60 @@ def test_vma1_exog(): # Have to ignore first 2 observations due to differences in initialization assert_allclose(res_mva.llf_obs[2:], (res_ma1.llf_obs + res_ma2.llf_obs)[2:]) + + +def test_param_names_trend(): + endog = np.zeros((3, 2)) + base_names = ['L1.y1.y1', 'L1.y2.y1', 'L1.y1.y2', 'L1.y2.y2', + 'sqrt.var.y1', 'sqrt.cov.y1.y2', 'sqrt.var.y2'] + base_params = [0.5, 0, 0, 0.4, 1.0, 0.0, 1.0] + + # No trend + mod = varmax.VARMAX(endog, order=(1, 0), trend='n') + desired = base_names + assert_equal(mod.param_names, desired) + + # Intercept + mod = varmax.VARMAX(endog, order=(1, 0), trend=[1]) + desired = ['intercept.y1', 'intercept.y2'] + base_names + assert_equal(mod.param_names, desired) + mod.update([1.2, -0.5] + base_params) + assert_allclose(mod['state_intercept'], [1.2, -0.5]) + + # Intercept + drift + mod = varmax.VARMAX(endog, order=(1, 0), trend=[1, 1]) + desired = (['intercept.y1', 'drift.y1', + 'intercept.y2', 'drift.y2'] + base_names) + assert_equal(mod.param_names, desired) + mod.update([1.2, 0, -0.5, 0] + base_params) + assert_allclose(mod['state_intercept', 0], 1.2) + assert_allclose(mod['state_intercept', 1], -0.5) + mod.update([0, 1, 0, 1.1] + base_params) + assert_allclose(mod['state_intercept', 0], np.arange(2, 5)) + assert_allclose(mod['state_intercept', 1], 1.1 * np.arange(2, 5)) + mod.update([1.2, 1, -0.5, 1.1] + base_params) + assert_allclose(mod['state_intercept', 0], 1.2 + np.arange(2, 5)) + assert_allclose(mod['state_intercept', 1], -0.5 + 1.1 * np.arange(2, 5)) + + # Drift only + mod = varmax.VARMAX(endog, order=(1, 0), trend=[0, 1]) + desired = ['drift.y1', 'drift.y2'] + base_names + assert_equal(mod.param_names, desired) + mod.update([1, 1.1] + base_params) + assert_allclose(mod['state_intercept', 0], np.arange(2, 5)) + assert_allclose(mod['state_intercept', 1], 1.1 * np.arange(2, 5)) + + # Intercept + third order + mod = varmax.VARMAX(endog, order=(1, 0), trend=[1, 0, 1]) + desired = (['intercept.y1', 'trend.2.y1', + 'intercept.y2', 'trend.2.y2'] + base_names) + assert_equal(mod.param_names, desired) + mod.update([1.2, 0, -0.5, 0] + base_params) + assert_allclose(mod['state_intercept', 0], 1.2) + assert_allclose(mod['state_intercept', 1], -0.5) + mod.update([0, 1, 0, 1.1] + base_params) + assert_allclose(mod['state_intercept', 0], np.arange(2, 5)**2) + assert_allclose(mod['state_intercept', 1], 1.1 * np.arange(2, 5)**2) + mod.update([1.2, 1, -0.5, 1.1] + base_params) + assert_allclose(mod['state_intercept', 0], 1.2 + np.arange(2, 5)**2) + assert_allclose(mod['state_intercept', 1], -0.5 + 1.1 * np.arange(2, 5)**2)
BUG: User-defined parameter ordering in simulation #### Description It seems the element ordering of `params` in VARMAX.simulate and `mod.param_names` is inconsistent. This results in unexpected simulation behavior with user-defined, fixed parameters. See also [this question](https://stackoverflow.com/questions/63631670/statsmodels-user-defined-parameter-ordering-in-simulate/) in StackOverflow. #### Steps to reproduce ```python import statsmodels as statsmodels import statsmodels.api as sm import pandas as pd import numpy as np endog = pd.DataFrame({"y":[np.nan, np.nan], "y2":[np.nan, np.nan]}, index=[0, 1]) statsmodels.__version__ >>>>>>>>> '0.12.0' ``` *Example 1*: Two-variable VAR(1) with intercepts and drifts, all parameters fixed to 0 except for intercept of ```y``` set to 1. Yields expected results. ```python params = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] mod1 = sm.tsa.VARMAX(endog, order=(1, 0), trend=[1, 1], trend_offset=0) print('\n'.join('{} :{}'.format(*k) for k in zip(mod1.param_names, params))) print(mod1.simulate(params=params, nsimulations=5, initial_state=[0, 0])) >>>>>>>>> intercept.y :1 intercept.y2 :0 drift.y :0 drift.y2 :0 L1.y.y :0 L1.y2.y :0 L1.y.y2 :0 L1.y2.y2 :0 sqrt.var.y :0 sqrt.cov.y.y2 :0 sqrt.var.y2 :0 y y2 0 0.0 0.0 1 1.0 0.0 2 1.0 0.0 3 1.0 0.0 4 1.0 0.0 ``` *Example 2*: Same as in Example 1 but now intercept of variable ```y``` is set to 0 and intercept of variables ```y2``` is set to 1. Rather than yielding the expected result it seems that the second element in ```params``` actually controls the drift parameter of ```y``` rather than the intercept term of ```y2```. This contradicts the parameter ordering given by ```mod2.param_names```. ```python params = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] mod2 = sm.tsa.VARMAX(endog, order=(1, 0), trend=[1, 1], trend_offset=0) print('\n'.join('{} :{}'.format(*k) for k in zip(mod2.param_names, params))) print(mod2.simulate(params=params, nsimulations=5, initial_state=[0, 0])) >>>>>>>>> intercept.y :0 intercept.y2 :1 drift.y :0 drift.y2 :0 L1.y.y :0 L1.y2.y :0 L1.y.y2 :0 L1.y2.y2 :0 sqrt.var.y :0 sqrt.cov.y.y2 :0 sqrt.var.y2 :0 y y2 0 0.0 0.0 1 1.0 0.0 2 2.0 0.0 3 3.0 0.0 4 4.0 0.0 ```
Thanks very much for this report. This is definitely a bug.
"2020-09-02T03:31:49Z"
0.12
[ "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVARMA::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_diagonal::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVMA1::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR2::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_forecast", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog2::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[True-c]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_forecast_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_vma1_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_append_results", "statsmodels/tsa/statespace/tests/test_varmax.py::test_specifications", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[False-c]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[True-ct]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_misspecifications", "statsmodels/tsa/statespace/tests/test_varmax.py::test_misc_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results_exog", "statsmodels/tsa/statespace/tests/test_varmax.py::test_apply_results", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[False-ct]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[False-n]", "statsmodels/tsa/statespace/tests/test_varmax.py::test_predict_custom_index", "statsmodels/tsa/statespace/tests/test_varmax.py::test_recreate_model", "statsmodels/tsa/statespace/tests/test_varmax.py::test_extend_results[True-n]", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_obs_intercept::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_representation", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_standardized_forecasts_error", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_measurement_error::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_forecast", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_results", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_bic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_summary", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_bse_approx", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_loglike", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_dynamic_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_mle", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_aic", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_predict", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_params", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_bse_oim", "statsmodels/tsa/statespace/tests/test_varmax.py::TestVAR_exog::test_standardized_forecasts_error" ]
[ "statsmodels/tsa/statespace/tests/test_varmax.py::test_param_names_trend" ]
Python
[]
[]
statsmodels/statsmodels
7,025
statsmodels__statsmodels-7025
[ "7014" ]
04d66900191eab72a479753d1114c044c3520252
diff --git a/statsmodels/tsa/stattools.py b/statsmodels/tsa/stattools.py --- a/statsmodels/tsa/stattools.py +++ b/statsmodels/tsa/stattools.py @@ -1,13 +1,13 @@ """ Statistical tools for time series analysis """ -import warnings - from statsmodels.compat.numpy import lstsq from statsmodels.compat.pandas import deprecate_kwarg from statsmodels.compat.python import lrange, lzip from statsmodels.compat.scipy import _next_regular +import warnings + import numpy as np from numpy.linalg import LinAlgError import pandas as pd @@ -130,14 +130,17 @@ def _autolag( elif method == "t-stat": # stop = stats.norm.ppf(.95) stop = 1.6448536269514722 + # Default values to ensure that always set + bestlag = startlag + maxlag + icbest = 0.0 for lag in range(startlag + maxlag, startlag - 1, -1): icbest = np.abs(results[lag].tvalues[-1]) + bestlag = lag if np.abs(icbest) >= stop: - bestlag = lag - icbest = icbest + # Break for first lag with a significant t-stat break else: - raise ValueError("Information Criterion %s not understood.") % method + raise ValueError(f"Information Criterion {method} not understood.") if not regresults: return icbest, bestlag @@ -976,18 +979,21 @@ def pacf(x, nlags=None, method="ywadjusted", alpha=None): consistently worse than the other options. """ nlags = int_like(nlags, "nlags", optional=True) - renames = {"ydu":"yda", - "ywu": "ywa", - "ywunbiased":"ywadjusted", - "ldunbiased":"ldadjusted", - "ld_unbiased":"ld_adjusted", - "ldu":"lda", - "ols-unbiased":"ols-adjusted"} + renames = { + "ydu": "yda", + "ywu": "ywa", + "ywunbiased": "ywadjusted", + "ldunbiased": "ldadjusted", + "ld_unbiased": "ld_adjusted", + "ldu": "lda", + "ols-unbiased": "ols-adjusted", + } if method in renames: warnings.warn( f"{method} has been renamed {renames[method]}. After release 0.13, " "using the old name will raise.", - FutureWarning) + FutureWarning, + ) method = renames[method] methods = ( "ols", @@ -1347,7 +1353,8 @@ def grangercausalitytests(x, maxlag, addconst=True, verbose=True): if lags.min() <= 0 or lags.size == 0: raise ValueError( "maxlag must be a non-empty list containing only " - "positive integers") + "positive integers" + ) if x.shape[0] <= 3 * maxlag + int(addconst): raise ValueError( @@ -1871,9 +1878,13 @@ def kpss(x, regression="c", nlags=None, store=False): look-up table. The actual p-value is {direction} than the p-value returned. """ if p_value == pvals[-1]: - warnings.warn(warn_msg.format(direction="smaller"), InterpolationWarning) + warnings.warn( + warn_msg.format(direction="smaller"), InterpolationWarning + ) elif p_value == pvals[0]: - warnings.warn(warn_msg.format(direction="greater"), InterpolationWarning) + warnings.warn( + warn_msg.format(direction="greater"), InterpolationWarning + ) crit_dict = {"10%": crit[0], "5%": crit[1], "2.5%": crit[2], "1%": crit[3]}
diff --git a/statsmodels/tsa/tests/test_stattools.py b/statsmodels/tsa/tests/test_stattools.py --- a/statsmodels/tsa/tests/test_stattools.py +++ b/statsmodels/tsa/tests/test_stattools.py @@ -1,30 +1,49 @@ +from statsmodels.compat.numpy import lstsq +from statsmodels.compat.pandas import assert_index_equal +from statsmodels.compat.platform import PLATFORM_WIN +from statsmodels.compat.python import lrange + import os import warnings import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_equal, + assert_raises, +) import pandas as pd +from pandas import DataFrame, Series, date_range import pytest -from numpy.testing import (assert_almost_equal, assert_equal, assert_raises, - assert_, assert_allclose) -from pandas import Series, date_range, DataFrame -from statsmodels.compat.numpy import lstsq -from statsmodels.compat.pandas import assert_index_equal -from statsmodels.compat.platform import PLATFORM_WIN -from statsmodels.compat.python import lrange -from statsmodels.datasets import macrodata, sunspots, nile, randhie, modechoice -from statsmodels.tools.sm_exceptions import (CollinearityWarning, - MissingDataError, - InterpolationWarning) +from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots +from statsmodels.tools.sm_exceptions import ( + CollinearityWarning, + InterpolationWarning, + MissingDataError, +) from statsmodels.tsa.arima_process import arma_acovf from statsmodels.tsa.statespace.sarimax import SARIMAX -from statsmodels.tsa.stattools import (adfuller, acf, pacf_yw, pacf_ols, - pacf, grangercausalitytests, - coint, acovf, kpss, - arma_order_select_ic, levinson_durbin, - levinson_durbin_pacf, pacf_burg, - innovations_algo, innovations_filter, - zivot_andrews) +from statsmodels.tsa.stattools import ( + acf, + acovf, + adfuller, + arma_order_select_ic, + coint, + grangercausalitytests, + innovations_algo, + innovations_filter, + kpss, + levinson_durbin, + levinson_durbin_pacf, + pacf, + pacf_burg, + pacf_ols, + pacf_yw, + zivot_andrews, +) DECIMAL_8 = 8 DECIMAL_6 = 6 @@ -37,7 +56,7 @@ CURR_DIR = os.path.dirname(os.path.abspath(__file__)) [email protected](scope='module') [email protected](scope="module") def acovf_data(): rnd = np.random.RandomState(12345) return rnd.randn(250) @@ -49,10 +68,11 @@ class CheckADF(object): Test values taken from Stata. """ - levels = ['1%', '5%', '10%'] + + levels = ["1%", "5%", "10%"] data = macrodata.load_pandas() - x = data.data['realgdp'].values - y = data.data['infl'].values + x = data.data["realgdp"].values + y = data.data["infl"].values def test_teststat(self): assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5) @@ -72,23 +92,20 @@ class TestADFConstant(CheckADF): @classmethod def setup_class(cls): - cls.res1 = adfuller(cls.x, regression="c", autolag=None, - maxlag=4) - cls.teststat = .97505319 - cls.pvalue = .99399563 + cls.res1 = adfuller(cls.x, regression="c", autolag=None, maxlag=4) + cls.teststat = 0.97505319 + cls.pvalue = 0.99399563 cls.critvalues = [-3.476, -2.883, -2.573] class TestADFConstantTrend(CheckADF): - """ - """ + """""" @classmethod def setup_class(cls): - cls.res1 = adfuller(cls.x, regression="ct", autolag=None, - maxlag=4) + cls.res1 = adfuller(cls.x, regression="ct", autolag=None, maxlag=4) cls.teststat = -1.8566374 - cls.pvalue = .67682968 + cls.pvalue = 0.67682968 cls.critvalues = [-4.007, -3.437, -3.137] @@ -101,16 +118,14 @@ def setup_class(cls): class TestADFNoConstant(CheckADF): - """ - """ + """""" @classmethod def setup_class(cls): - cls.res1 = adfuller(cls.x, regression="nc", autolag=None, - maxlag=4) + cls.res1 = adfuller(cls.x, regression="nc", autolag=None, maxlag=4) cls.teststat = 3.5227498 - cls.pvalue = .99999 + cls.pvalue = 0.99999 # Stata does not return a p-value for noconstant. # Tau^max in MacKinnon (1994) is missing, so it is # assumed that its right-tail is well-behaved @@ -120,51 +135,53 @@ def setup_class(cls): # No Unit Root + class TestADFConstant2(CheckADF): @classmethod def setup_class(cls): - cls.res1 = adfuller(cls.y, regression="c", autolag=None, - maxlag=1) + cls.res1 = adfuller(cls.y, regression="c", autolag=None, maxlag=1) cls.teststat = -4.3346988 - cls.pvalue = .00038661 + cls.pvalue = 0.00038661 cls.critvalues = [-3.476, -2.883, -2.573] class TestADFConstantTrend2(CheckADF): @classmethod def setup_class(cls): - cls.res1 = adfuller(cls.y, regression="ct", autolag=None, - maxlag=1) + cls.res1 = adfuller(cls.y, regression="ct", autolag=None, maxlag=1) cls.teststat = -4.425093 - cls.pvalue = .00199633 + cls.pvalue = 0.00199633 cls.critvalues = [-4.006, -3.437, -3.137] class TestADFNoConstant2(CheckADF): @classmethod def setup_class(cls): - cls.res1 = adfuller(cls.y, regression="nc", autolag=None, - maxlag=1) + cls.res1 = adfuller(cls.y, regression="nc", autolag=None, maxlag=1) cls.teststat = -2.4511596 cls.pvalue = 0.013747 # Stata does not return a p-value for noconstant # this value is just taken from our results cls.critvalues = [-2.587, -1.950, -1.617] - _, _1, _2, cls.store = adfuller(cls.y, regression="nc", autolag=None, - maxlag=1, store=True) + _, _1, _2, cls.store = adfuller( + cls.y, regression="nc", autolag=None, maxlag=1, store=True + ) def test_store_str(self): - assert_equal(self.store.__str__(), 'Augmented Dickey-Fuller Test Results') + assert_equal( + self.store.__str__(), "Augmented Dickey-Fuller Test Results" + ) class CheckCorrGram(object): """ Set up for ACF, PACF tests. """ + data = macrodata.load_pandas() - x = data.data['realgdp'] - filename = os.path.join(CURR_DIR, 'results', 'results_corrgram.csv') - results = pd.read_csv(filename, delimiter=',') + x = data.data["realgdp"] + filename = os.path.join(CURR_DIR, "results", "results_corrgram.csv") + results = pd.read_csv(filename, delimiter=",") class TestACF(CheckCorrGram): @@ -174,11 +191,11 @@ class TestACF(CheckCorrGram): @classmethod def setup_class(cls): - cls.acf = cls.results['acvar'] + cls.acf = cls.results["acvar"] # cls.acf = np.concatenate(([1.], cls.acf)) - cls.qstat = cls.results['Q1'] - cls.res1 = acf(cls.x, nlags=40, qstat=True, alpha=.05, fft=False) - cls.confint_res = cls.results[['acvar_lb', 'acvar_ub']].values + cls.qstat = cls.results["Q1"] + cls.res1 = acf(cls.x, nlags=40, qstat=True, alpha=0.05, fft=False) + cls.confint_res = cls.results[["acvar_lb", "acvar_ub"]].values def test_acf(self): assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8) @@ -201,8 +218,8 @@ class TestACF_FFT(CheckCorrGram): # Test Autocorrelation Function using FFT @classmethod def setup_class(cls): - cls.acf = cls.results['acvarfft'] - cls.qstat = cls.results['Q1'] + cls.acf = cls.results["acvarfft"] + cls.qstat = cls.results["Q1"] cls.res1 = acf(cls.x, nlags=40, qstat=True, fft=True) def test_acf(self): @@ -218,21 +235,35 @@ class TestACFMissing(CheckCorrGram): @classmethod def setup_class(cls): cls.x = np.concatenate((np.array([np.nan]), cls.x)) - cls.acf = cls.results['acvar'] # drop and conservative - cls.qstat = cls.results['Q1'] - cls.res_drop = acf(cls.x, nlags=40, qstat=True, alpha=.05, - missing='drop', fft=False) - cls.res_conservative = acf(cls.x, nlags=40, qstat=True, alpha=.05, - fft=False, missing='conservative') + cls.acf = cls.results["acvar"] # drop and conservative + cls.qstat = cls.results["Q1"] + cls.res_drop = acf( + cls.x, nlags=40, qstat=True, alpha=0.05, missing="drop", fft=False + ) + cls.res_conservative = acf( + cls.x, + nlags=40, + qstat=True, + alpha=0.05, + fft=False, + missing="conservative", + ) cls.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive cls.qstat_none = np.empty(40) * np.nan - cls.res_none = acf(cls.x, nlags=40, qstat=True, alpha=.05, - missing='none', fft=False) + cls.res_none = acf( + cls.x, nlags=40, qstat=True, alpha=0.05, missing="none", fft=False + ) def test_raise(self): with pytest.raises(MissingDataError): - acf(self.x, nlags=40, qstat=True, fft=False, alpha=.05, - missing='raise') + acf( + self.x, + nlags=40, + qstat=True, + fft=False, + alpha=0.05, + missing="raise", + ) def test_acf_none(self): assert_almost_equal(self.res_none[0][1:41], self.acf_none, DECIMAL_8) @@ -241,8 +272,9 @@ def test_acf_drop(self): assert_almost_equal(self.res_drop[0][1:41], self.acf, DECIMAL_8) def test_acf_conservative(self): - assert_almost_equal(self.res_conservative[0][1:41], self.acf, - DECIMAL_8) + assert_almost_equal( + self.res_conservative[0][1:41], self.acf, DECIMAL_8 + ) def test_qstat_none(self): # todo why is res1/qstat 1 short @@ -259,18 +291,18 @@ def test_qstat_none(self): class TestPACF(CheckCorrGram): @classmethod def setup_class(cls): - cls.pacfols = cls.results['PACOLS'] - cls.pacfyw = cls.results['PACYW'] + cls.pacfols = cls.results["PACOLS"] + cls.pacfyw = cls.results["PACYW"] def test_ols(self): - pacfols, confint = pacf(self.x, nlags=40, alpha=.05, method="ols") + pacfols, confint = pacf(self.x, nlags=40, alpha=0.05, method="ols") assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6) centered = confint - confint.mean(1)[:, None] # from edited Stata ado file - res = [[-.1375625, .1375625]] * 40 + res = [[-0.1375625, 0.1375625]] * 40 assert_almost_equal(centered[1:41], res, DECIMAL_6) # check lag 0 - assert_equal(centered[0], [0., 0.]) + assert_equal(centered[0], [0.0, 0.0]) assert_equal(confint[0], [1, 1]) assert_equal(pacfols[0], 1) @@ -285,8 +317,8 @@ def test_ols_inefficient(self): direct = np.empty(lag_len + 1) direct[0] = 1.0 for i in range(lag_len): - lags[:, i] = x[5 - (i + 1):-(i + 1)] - direct[i + 1] = lstsq(lags[:, :(i + 1)], lead, rcond=None)[0][-1] + lags[:, i] = x[5 - (i + 1) : -(i + 1)] + direct[i + 1] = lstsq(lags[:, : (i + 1)], lead, rcond=None)[0][-1] assert_allclose(pacfols, direct, atol=1e-8) def test_yw(self): @@ -309,10 +341,11 @@ class CheckCoint(object): Test values taken from Stata """ - levels = ['1%', '5%', '10%'] + + levels = ["1%", "5%", "10%"] data = macrodata.load_pandas() - y1 = data.data['realcons'].values - y2 = data.data['realgdp'].values + y1 = data.data["realcons"].values + y2 = data.data["realgdp"].values def test_tstat(self): assert_almost_equal(self.coint_t, self.teststat, DECIMAL_4) @@ -327,7 +360,9 @@ class TestCoint_t(CheckCoint): @classmethod def setup_class(cls): # cls.coint_t = coint(cls.y1, cls.y2, trend="c")[0] - cls.coint_t = coint(cls.y1, cls.y2, trend="c", maxlag=0, autolag=None)[0] + cls.coint_t = coint(cls.y1, cls.y2, trend="c", maxlag=0, autolag=None)[ + 0 + ] cls.teststat = -1.8208817 cls.teststat = -1.830170986148 @@ -345,7 +380,7 @@ def test_coint(): # FIXME: enable/xfail/skip or delete for trend in []: # ['c', 'ct', 'ctt', 'nc']: - print('\n', trend) + print("\n", trend) print(coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None)) print(coint(y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None)) print(coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None)) @@ -354,44 +389,90 @@ def test_coint(): # results from Stata egranger res_egranger = {} # trend = 'ct' - res = res_egranger['ct'] = {} - res[0] = [-5.615251442239, -4.406102369132, -3.82866685109, -3.532082997903] - res[1] = [-5.63591313706, -4.758609717199, -4.179130554708, -3.880909696863] - res[2] = [-2.892029275027, -4.758609717199, -4.179130554708, -3.880909696863] + res = res_egranger["ct"] = {} + res[0] = [ + -5.615251442239, + -4.406102369132, + -3.82866685109, + -3.532082997903, + ] + res[1] = [ + -5.63591313706, + -4.758609717199, + -4.179130554708, + -3.880909696863, + ] + res[2] = [ + -2.892029275027, + -4.758609717199, + -4.179130554708, + -3.880909696863, + ] res[3] = [-5.626932544079, -5.08363327039, -4.502469783057, -4.2031051091] # trend = 'c' - res = res_egranger['c'] = {} + res = res_egranger["c"] = {} # first critical value res[0][1] has a discrepancy starting at 4th decimal - res[0] = [-5.760696844656, -3.952043522638, -3.367006313729, -3.065831247948] + res[0] = [ + -5.760696844656, + -3.952043522638, + -3.367006313729, + -3.065831247948, + ] # manually adjusted to have higher precision as in other cases res[0][1] = -3.952321293401682 - res[1] = [-5.781087068772, -4.367111915942, -3.783961136005, -3.483501524709] - res[2] = [-2.477444137366, -4.367111915942, -3.783961136005, -3.483501524709] - res[3] = [-5.778205811661, -4.735249216434, -4.152738973763, -3.852480848968] + res[1] = [ + -5.781087068772, + -4.367111915942, + -3.783961136005, + -3.483501524709, + ] + res[2] = [ + -2.477444137366, + -4.367111915942, + -3.783961136005, + -3.483501524709, + ] + res[3] = [ + -5.778205811661, + -4.735249216434, + -4.152738973763, + -3.852480848968, + ] # trend = 'ctt' - res = res_egranger['ctt'] = {} - res[0] = [-5.644431269946, -4.796038299708, -4.221469431008, -3.926472577178] + res = res_egranger["ctt"] = {} + res[0] = [ + -5.644431269946, + -4.796038299708, + -4.221469431008, + -3.926472577178, + ] res[1] = [-5.665691609506, -5.111158174219, -4.53317278104, -4.23601008516] res[2] = [-3.161462374828, -5.111158174219, -4.53317278104, -4.23601008516] - res[3] = [-5.657904558563, -5.406880189412, -4.826111619543, -4.527090164875] + res[3] = [ + -5.657904558563, + -5.406880189412, + -4.826111619543, + -4.527090164875, + ] # The following for 'nc' are only regression test numbers # trend = 'nc' not allowed in egranger # trend = 'nc' - res = res_egranger['nc'] = {} + res = res_egranger["nc"] = {} nan = np.nan # shortcut for table res[0] = [-3.7146175989071137, nan, nan, nan] res[1] = [-3.8199323012888384, nan, nan, nan] res[2] = [-1.6865000791270679, nan, nan, nan] res[3] = [-3.7991270451873675, nan, nan, nan] - for trend in ['c', 'ct', 'ctt', 'nc']: + for trend in ["c", "ct", "ctt", "nc"]: res1 = {} res1[0] = coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None) - res1[1] = coint(y[:, 0], y[:, 1:3], trend=trend, maxlag=4, - autolag=None) + res1[1] = coint( + y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None + ) res1[2] = coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None) res1[3] = coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None) @@ -404,11 +485,15 @@ def test_coint(): assert_allclose(r1, r2, rtol=0, atol=6e-7) # use default autolag #4490 - res1_0 = coint(y[:, 0], y[:, 1], trend='ct', maxlag=4) - assert_allclose(res1_0[2], res_egranger['ct'][0][1:], rtol=0, atol=6e-7) + res1_0 = coint(y[:, 0], y[:, 1], trend="ct", maxlag=4) + assert_allclose(res1_0[2], res_egranger["ct"][0][1:], rtol=0, atol=6e-7) # the following is just a regression test - assert_allclose(res1_0[:2], [-13.992946638547112, 2.270898990540678e-27], - rtol=1e-10, atol=1e-27) + assert_allclose( + res1_0[:2], + [-13.992946638547112, 2.270898990540678e-27], + rtol=1e-10, + atol=1e-27, + ) def test_coint_identical_series(): @@ -416,7 +501,7 @@ def test_coint_identical_series(): scale_e = 1 np.random.seed(123) y = scale_e * np.random.randn(nobs) - warnings.simplefilter('always', CollinearityWarning) + warnings.simplefilter("always", CollinearityWarning) with pytest.warns(CollinearityWarning): c = coint(y, y, trend="c", maxlag=0, autolag=None) assert_equal(c[1], 0.0) @@ -430,7 +515,7 @@ def test_coint_perfect_collinearity(): np.random.seed(123) x = scale_e * np.random.randn(nobs, 2) y = 1 + x.sum(axis=1) + 1e-7 * np.random.randn(nobs) - warnings.simplefilter('always', CollinearityWarning) + warnings.simplefilter("always", CollinearityWarning) with warnings.catch_warnings(record=True) as w: c = coint(y, x, trend="c", maxlag=0, autolag=None) assert_equal(c[1], 0.0) @@ -438,34 +523,36 @@ def test_coint_perfect_collinearity(): class TestGrangerCausality(object): - def test_grangercausality(self): # some example data mdata = macrodata.load_pandas().data - mdata = mdata[['realgdp', 'realcons']].values + mdata = mdata[["realgdp", "realcons"]].values data = mdata.astype(float) data = np.diff(np.log(data), axis=0) # R: lmtest:grangertest r_result = [0.243097, 0.7844328, 195, 2] # f_test gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False) - assert_almost_equal(r_result, gr[2][0]['ssr_ftest'], decimal=7) - assert_almost_equal(gr[2][0]['params_ftest'], gr[2][0]['ssr_ftest'], - decimal=7) + assert_almost_equal(r_result, gr[2][0]["ssr_ftest"], decimal=7) + assert_almost_equal( + gr[2][0]["params_ftest"], gr[2][0]["ssr_ftest"], decimal=7 + ) def test_grangercausality_single(self): mdata = macrodata.load_pandas().data - mdata = mdata[['realgdp', 'realcons']].values + mdata = mdata[["realgdp", "realcons"]].values data = mdata.astype(float) data = np.diff(np.log(data), axis=0) gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False) gr2 = grangercausalitytests(data[:, 1::-1], [2], verbose=False) assert 1 in gr assert 1 not in gr2 - assert_almost_equal(gr[2][0]['ssr_ftest'], gr2[2][0]['ssr_ftest'], - decimal=7) - assert_almost_equal(gr[2][0]['params_ftest'], gr2[2][0]['ssr_ftest'], - decimal=7) + assert_almost_equal( + gr[2][0]["ssr_ftest"], gr2[2][0]["ssr_ftest"], decimal=7 + ) + assert_almost_equal( + gr[2][0]["params_ftest"], gr2[2][0]["ssr_ftest"], decimal=7 + ) def test_granger_fails_on_nobs_check(self, reset_randomstate): # Test that if maxlag is too large, Granger Test raises a clear error. @@ -484,8 +571,9 @@ def test_granger_fails_on_finite_check(self, reset_randomstate): def test_granger_fails_on_zero_lag(self, reset_randomstate): x = np.random.rand(1000, 2) with pytest.raises( - ValueError, - match="maxlag must be a non-empty list containing only positive integers"): + ValueError, + match="maxlag must be a non-empty list containing only positive integers", + ): grangercausalitytests(x, [0, 1, 2]) @@ -500,15 +588,16 @@ class TestKPSS: In this context, x is the vector containing the macrodata['realgdp'] series. """ + @classmethod def setup(cls): cls.data = macrodata.load_pandas() - cls.x = cls.data.data['realgdp'].values + cls.x = cls.data.data["realgdp"].values def test_fail_nonvector_input(self, reset_randomstate): # should be fine with pytest.warns(InterpolationWarning): - kpss(self.x, nlags='legacy') + kpss(self.x, nlags="legacy") x = np.random.rand(20, 2) assert_raises(ValueError, kpss, x) @@ -516,38 +605,39 @@ def test_fail_nonvector_input(self, reset_randomstate): def test_fail_unclear_hypothesis(self): # these should be fine, with pytest.warns(InterpolationWarning): - kpss(self.x, 'c', nlags='legacy') + kpss(self.x, "c", nlags="legacy") with pytest.warns(InterpolationWarning): - kpss(self.x, 'C', nlags='legacy') + kpss(self.x, "C", nlags="legacy") with pytest.warns(InterpolationWarning): - kpss(self.x, 'ct', nlags='legacy') + kpss(self.x, "ct", nlags="legacy") with pytest.warns(InterpolationWarning): - kpss(self.x, 'CT', nlags='legacy') + kpss(self.x, "CT", nlags="legacy") - assert_raises(ValueError, kpss, self.x, "unclear hypothesis", - nlags='legacy') + assert_raises( + ValueError, kpss, self.x, "unclear hypothesis", nlags="legacy" + ) def test_teststat(self): with pytest.warns(InterpolationWarning): - kpss_stat, pval, lags, crits = kpss(self.x, 'c', 3) + kpss_stat, _, _, _ = kpss(self.x, "c", 3) assert_almost_equal(kpss_stat, 5.0169, DECIMAL_3) with pytest.warns(InterpolationWarning): - kpss_stat, pval, lags, crits = kpss(self.x, 'ct', 3) + kpss_stat, _, _, _ = kpss(self.x, "ct", 3) assert_almost_equal(kpss_stat, 1.1828, DECIMAL_3) def test_pval(self): with pytest.warns(InterpolationWarning): - kpss_stat, pval, lags, crits = kpss(self.x, 'c', 3) + _, pval, _, _ = kpss(self.x, "c", 3) assert_equal(pval, 0.01) with pytest.warns(InterpolationWarning): - kpss_stat, pval, lags, crits = kpss(self.x, 'ct', 3) + _, pval, _, _ = kpss(self.x, "ct", 3) assert_equal(pval, 0.01) def test_store(self): with pytest.warns(InterpolationWarning): - kpss_stat, pval, crit, store = kpss(self.x, 'c', 3, True) + _, _, _, store = kpss(self.x, "c", 3, True) # assert attributes, and make sure they're correct assert_equal(store.nobs, len(self.x)) @@ -557,22 +647,22 @@ def test_store(self): def test_lags(self): # real GDP from macrodata data set with pytest.warns(InterpolationWarning): - res = kpss(self.x, 'c', nlags='auto') + res = kpss(self.x, "c", nlags="auto") assert_equal(res[2], 9) # real interest rates from macrodata data set - res = kpss(sunspots.load(True).data['SUNACTIVITY'], 'c', nlags='auto') + res = kpss(sunspots.load(True).data["SUNACTIVITY"], "c", nlags="auto") assert_equal(res[2], 7) # volumes from nile data set with pytest.warns(InterpolationWarning): - res = kpss(nile.load(True).data['volume'], 'c', nlags='auto') + res = kpss(nile.load(True).data["volume"], "c", nlags="auto") assert_equal(res[2], 5) # log-coinsurance from randhie data set with pytest.warns(InterpolationWarning): - res = kpss(randhie.load(True).data['lncoins'], 'ct', nlags='auto') + res = kpss(randhie.load(True).data["lncoins"], "ct", nlags="auto") assert_equal(res[2], 75) # in-vehicle time from modechoice data set with pytest.warns(InterpolationWarning): - res = kpss(modechoice.load(True).data['invt'], 'ct', nlags='auto') + res = kpss(modechoice.load(True).data["invt"], "ct", nlags="auto") assert_equal(res[2], 18) def test_kpss_fails_on_nobs_check(self): @@ -580,47 +670,34 @@ def test_kpss_fails_on_nobs_check(self): # clear error # GH5925 nobs = len(self.x) - msg = (r"lags \({}\) must be < number of observations \({}\)" - .format(nobs, nobs)) + msg = r"lags \({}\) must be < number of observations \({}\)".format( + nobs, nobs + ) with pytest.raises(ValueError, match=msg): - kpss(self.x, 'c', nlags=nobs) + kpss(self.x, "c", nlags=nobs) def test_kpss_autolags_does_not_assign_lags_equal_to_nobs(self): # Test that if *autolags* exceeds number of observations, we set # suitable lags # GH5925 - data_which_breaks_autolag = np.array( - [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, - 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, - 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, - 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, - 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, - 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, - 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, - 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, - 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, - 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, - 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, - 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, - 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, - 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]).astype(float) - + base = np.array([0, 0, 0, 0, 0, 1, 1.0]) + data_which_breaks_autolag = np.r_[np.tile(base, 297 // 7), [0, 0, 0]] kpss(data_which_breaks_autolag, nlags="auto") def test_legacy_lags(self): # Test legacy lags are the same with pytest.warns(InterpolationWarning): - res = kpss(self.x, 'c', nlags='legacy') + res = kpss(self.x, "c", nlags="legacy") assert_equal(res[2], 15) def test_unknown_lags(self): # Test legacy lags are the same with pytest.raises(ValueError): - kpss(self.x, 'c', nlags='unknown') + kpss(self.x, "c", nlags="unknown") def test_deprecation(self): with pytest.warns(FutureWarning): - kpss(self.x, 'c') + kpss(self.x, "c") def test_pandasacovf(): @@ -630,7 +707,7 @@ def test_pandasacovf(): def test_acovf2d(reset_randomstate): dta = sunspots.load_pandas().data - dta.index = date_range(start='1700', end='2009', freq='A')[:309] + dta.index = date_range(start="1700", end="2009", freq="A")[:309] del dta["YEAR"] res = acovf(dta, fft=False) assert_equal(res, acovf(dta.values, fft=False)) @@ -639,8 +716,8 @@ def test_acovf2d(reset_randomstate): acovf(x, fft=False) [email protected]('demean', [True, False]) [email protected]('adjusted', [True, False]) [email protected]("demean", [True, False]) [email protected]("adjusted", [True, False]) def test_acovf_fft_vs_convolution(demean, adjusted): np.random.seed(1) q = np.random.normal(size=100) @@ -656,25 +733,33 @@ def test_arma_order_select_ic(): # smoke test, assumes info-criteria are right from statsmodels.tsa.arima_process import arma_generate_sample - arparams = np.array([.75, -.25]) - maparams = np.array([.65, .35]) + arparams = np.array([0.75, -0.25]) + maparams = np.array([0.65, 0.35]) arparams = np.r_[1, -arparams] maparam = np.r_[1, maparams] nobs = 250 np.random.seed(2014) y = arma_generate_sample(arparams, maparams, nobs) - res = arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc') + res = arma_order_select_ic(y, ic=["aic", "bic"], trend="nc") # regression tests in case we change algorithm to minic in sas - aic_x = np.array([[np.nan, 552.7342255, 484.29687843], - [562.10924262, 485.5197969, 480.32858497], - [507.04581344, 482.91065829, 481.91926034], - [484.03995962, 482.14868032, 483.86378955], - [481.8849479, 483.8377379, 485.83756612]]) - bic_x = np.array([[np.nan, 559.77714733, 494.86126118], - [569.15216446, 496.08417966, 494.41442864], - [517.61019619, 496.99650196, 499.52656493], - [498.12580329, 499.75598491, 504.99255506], - [499.49225249, 504.96650341, 510.48779255]]) + aic_x = np.array( + [ + [np.nan, 552.7342255, 484.29687843], + [562.10924262, 485.5197969, 480.32858497], + [507.04581344, 482.91065829, 481.91926034], + [484.03995962, 482.14868032, 483.86378955], + [481.8849479, 483.8377379, 485.83756612], + ] + ) + bic_x = np.array( + [ + [np.nan, 559.77714733, 494.86126118], + [569.15216446, 496.08417966, 494.41442864], + [517.61019619, 496.99650196, 499.52656493], + [498.12580329, 499.75598491, 504.99255506], + [499.49225249, 504.96650341, 510.48779255], + ] + ) aic = DataFrame(aic_x, index=lrange(5), columns=lrange(3)) bic = DataFrame(bic_x, index=lrange(5), columns=lrange(3)) assert_almost_equal(res.aic.values, aic.values, 5) @@ -686,16 +771,17 @@ def test_arma_order_select_ic(): assert_(res.bic.index.equals(bic.index)) assert_(res.bic.columns.equals(bic.columns)) - index = pd.date_range('2000-1-1', freq='M', periods=len(y)) + index = pd.date_range("2000-1-1", freq="M", periods=len(y)) y_series = pd.Series(y, index=index) - res_pd = arma_order_select_ic(y_series, max_ar=2, max_ma=1, - ic=['aic', 'bic'], trend='nc') + res_pd = arma_order_select_ic( + y_series, max_ar=2, max_ma=1, ic=["aic", "bic"], trend="nc" + ) assert_almost_equal(res_pd.aic.values, aic.values[:3, :2], 5) assert_almost_equal(res_pd.bic.values, bic.values[:3, :2], 5) assert_equal(res_pd.aic_min_order, (2, 1)) assert_equal(res_pd.bic_min_order, (1, 1)) - res = arma_order_select_ic(y, ic='aic', trend='nc') + res = arma_order_select_ic(y, ic="aic", trend="nc") assert_almost_equal(res.aic.values, aic.values, 5) assert_(res.aic.index.equals(aic.index)) assert_(res.aic.columns.equals(aic.columns)) @@ -706,17 +792,32 @@ def test_arma_order_select_ic_failure(): # this should trigger an SVD convergence failure, smoke test that it # returns, likely platform dependent failure... # looks like AR roots may be cancelling out for 4, 1? - y = np.array([0.86074377817203640006, 0.85316549067906921611, - 0.87104653774363305363, 0.60692382068987393851, - 0.69225941967301307667, 0.73336177248909339976, - 0.03661329261479619179, 0.15693067239962379955, - 0.12777403512447857437, -0.27531446294481976, - -0.24198139631653581283, -0.23903317951236391359, - -0.26000241325906497947, -0.21282920015519238288, - -0.15943768324388354896, 0.25169301564268781179, - 0.1762305709151877342, 0.12678133368791388857, - 0.89755829086753169399, 0.82667068795350151511]) + y = np.array( + [ + 0.86074377817203640006, + 0.85316549067906921611, + 0.87104653774363305363, + 0.60692382068987393851, + 0.69225941967301307667, + 0.73336177248909339976, + 0.03661329261479619179, + 0.15693067239962379955, + 0.12777403512447857437, + -0.27531446294481976, + -0.24198139631653581283, + -0.23903317951236391359, + -0.26000241325906497947, + -0.21282920015519238288, + -0.15943768324388354896, + 0.25169301564268781179, + 0.1762305709151877342, + 0.12678133368791388857, + 0.89755829086753169399, + 0.82667068795350151511, + ] + ) import warnings + with warnings.catch_warnings(): # catch a hessian inversion and convergence failure warning warnings.simplefilter("ignore") @@ -726,7 +827,9 @@ def test_arma_order_select_ic_failure(): def test_acf_fft_dataframe(): # regression test #322 - result = acf(sunspots.load_pandas().data[['SUNACTIVITY']], fft=True, nlags=20) + result = acf( + sunspots.load_pandas().data[["SUNACTIVITY"]], fft=True, nlags=20 + ) assert_equal(result.ndim, 1) @@ -740,29 +843,43 @@ def test_levinson_durbin_acov(): assert_allclose(pacf, np.array([1, rho] + [0] * (m - 1)), atol=1e-8) [email protected]("missing", ['conservative', 'drop', 'raise', 'none']) [email protected]("missing", ["conservative", "drop", "raise", "none"]) @pytest.mark.parametrize("fft", [False, True]) @pytest.mark.parametrize("demean", [True, False]) @pytest.mark.parametrize("adjusted", [True, False]) def test_acovf_nlags(acovf_data, adjusted, demean, fft, missing): - full = acovf(acovf_data, adjusted=adjusted, demean=demean, fft=fft, - missing=missing) - limited = acovf(acovf_data, adjusted=adjusted, demean=demean, fft=fft, - missing=missing, nlag=10) + full = acovf( + acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing + ) + limited = acovf( + acovf_data, + adjusted=adjusted, + demean=demean, + fft=fft, + missing=missing, + nlag=10, + ) assert_allclose(full[:11], limited) [email protected]("missing", ['conservative', 'drop']) [email protected]("missing", ["conservative", "drop"]) @pytest.mark.parametrize("fft", [False, True]) @pytest.mark.parametrize("demean", [True, False]) @pytest.mark.parametrize("adjusted", [True, False]) def test_acovf_nlags_missing(acovf_data, adjusted, demean, fft, missing): acovf_data = acovf_data.copy() acovf_data[1:3] = np.nan - full = acovf(acovf_data, adjusted=adjusted, demean=demean, fft=fft, - missing=missing) - limited = acovf(acovf_data, adjusted=adjusted, demean=demean, fft=fft, - missing=missing, nlag=10) + full = acovf( + acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing + ) + limited = acovf( + acovf_data, + adjusted=adjusted, + demean=demean, + fft=fft, + missing=missing, + nlag=10, + ) assert_allclose(full[:11], limited) @@ -786,16 +903,16 @@ def test_pacf2acf_ar(): pacf[0] = 1 pacf[1] = 0.9 ar, acf = levinson_durbin_pacf(pacf) - assert_allclose(acf, 0.9 ** np.arange(10.)) + assert_allclose(acf, 0.9 ** np.arange(10.0)) assert_allclose(ar, pacf[1:], atol=1e-8) ar, acf = levinson_durbin_pacf(pacf, nlags=5) - assert_allclose(acf, 0.9 ** np.arange(6.)) + assert_allclose(acf, 0.9 ** np.arange(6.0)) assert_allclose(ar, pacf[1:6], atol=1e-8) def test_pacf2acf_levinson_durbin(): - pacf = -0.9 ** np.arange(11.) + pacf = -(0.9 ** np.arange(11.0)) pacf[0] = 1 ar, acf = levinson_durbin_pacf(pacf) _, ar_ld, pacf_ld, _, _ = levinson_durbin(acf, 10, isacov=True) @@ -803,13 +920,23 @@ def test_pacf2acf_levinson_durbin(): assert_allclose(pacf, pacf_ld, atol=1e-8) # From R, FitAR, PacfToAR - ar_from_r = [-4.1609, -9.2549, -14.4826, -17.6505, -17.5012, -14.2969, -9.5020, -4.9184, - -1.7911, -0.3486] + ar_from_r = [ + -4.1609, + -9.2549, + -14.4826, + -17.6505, + -17.5012, + -14.2969, + -9.5020, + -4.9184, + -1.7911, + -0.3486, + ] assert_allclose(ar, ar_from_r, atol=1e-4) def test_pacf2acf_errors(): - pacf = -0.9 ** np.arange(11.) + pacf = -(0.9 ** np.arange(11.0)) pacf[0] = 1 with pytest.raises(ValueError): levinson_durbin_pacf(pacf, nlags=20) @@ -847,7 +974,7 @@ def test_innovations_algo_brockwell_davis(): ma = -0.9 acovf = np.array([1 + ma ** 2, ma]) theta, sigma2 = innovations_algo(acovf, nobs=4) - exp_theta = np.array([[0], [-.4972], [-.6606], [-.7404]]) + exp_theta = np.array([[0], [-0.4972], [-0.6606], [-0.7404]]) assert_allclose(theta, exp_theta, rtol=1e-4) assert_allclose(sigma2, [1.81, 1.3625, 1.2155, 1.1436], rtol=1e-4) @@ -875,7 +1002,7 @@ def test_innovations_errors(): with pytest.raises(ValueError): innovations_algo(np.empty((2, 2))) with pytest.raises(TypeError): - innovations_algo(acovf, rtol='none') + innovations_algo(acovf, rtol="none") def test_innovations_filter_brockwell_davis(reset_randomstate): @@ -897,8 +1024,7 @@ def test_innovations_filter_pandas(reset_randomstate): acovf = np.array([1 + (ma ** 2).sum(), ma[0] + ma[1] * ma[0], ma[1]]) theta, _ = innovations_algo(acovf, nobs=10) endog = np.random.randn(10) - endog_pd = pd.Series(endog, - index=pd.date_range('2000-01-01', periods=10)) + endog_pd = pd.Series(endog, index=pd.date_range("2000-01-01", periods=10)) resid = innovations_filter(endog, theta) resid_pd = innovations_filter(endog_pd, theta) assert_allclose(resid, resid_pd.values) @@ -930,8 +1056,9 @@ def test_innovations_algo_filter_kalman_filter(reset_randomstate): endog = np.random.normal(size=10) # Innovations algorithm approach - acovf = arma_acovf(np.r_[1, -ar_params], np.r_[1, ma_params], - nobs=len(endog)) + acovf = arma_acovf( + np.r_[1, -ar_params], np.r_[1, ma_params], nobs=len(endog) + ) theta, v = innovations_algo(acovf) u = innovations_filter(endog, theta) @@ -944,8 +1071,9 @@ def test_innovations_algo_filter_kalman_filter(reset_randomstate): # Test that the two approaches are identical atol = 1e-6 if PLATFORM_WIN else 0.0 assert_allclose(u, res.forecasts_error[0], rtol=1e-6, atol=atol) - assert_allclose(theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1], - atol=atol) + assert_allclose( + theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1], atol=atol + ) assert_allclose(llf_obs, res.llf_obs, atol=atol) @@ -954,25 +1082,25 @@ def test_adfuller_short_series(reset_randomstate): res = adfuller(y, store=True) assert res[-1].maxlag == 1 y = np.random.standard_normal(2) - with pytest.raises(ValueError, match='sample size is too short'): + with pytest.raises(ValueError, match="sample size is too short"): adfuller(y) y = np.random.standard_normal(3) - with pytest.raises(ValueError, match='sample size is too short'): - adfuller(y, regression='ct') + with pytest.raises(ValueError, match="sample size is too short"): + adfuller(y, regression="ct") def test_adfuller_maxlag_too_large(reset_randomstate): y = np.random.standard_normal(100) - with pytest.raises(ValueError, match='maxlag must be less than'): + with pytest.raises(ValueError, match="maxlag must be less than"): adfuller(y, maxlag=51) class SetupZivotAndrews(object): # test directory cur_dir = CURR_DIR - run_dir = os.path.join(cur_dir, 'results') + run_dir = os.path.join(cur_dir, "results") # use same file for testing failure modes - fail_file = os.path.join(run_dir, 'rgnp.csv') + fail_file = os.path.join(run_dir, "rgnp.csv") fail_mdl = np.asarray(pd.read_csv(fail_file)) @@ -981,7 +1109,7 @@ class TestZivotAndrews(SetupZivotAndrews): # failure mode tests def test_fail_regression_type(self): with pytest.raises(ValueError): - zivot_andrews(self.fail_mdl, regression='x') + zivot_andrews(self.fail_mdl, regression="x") def test_fail_trim_value(self): with pytest.raises(ValueError): @@ -993,62 +1121,76 @@ def test_fail_array_shape(self): def test_fail_autolag_type(self): with pytest.raises(ValueError): - zivot_andrews(self.fail_mdl, autolag='None') + zivot_andrews(self.fail_mdl, autolag="None") - @pytest.mark.parametrize('autolag', ["AIC", "aic", "Aic"]) + @pytest.mark.parametrize("autolag", ["AIC", "aic", "Aic"]) def test_autolag_case_sensitivity(self, autolag): res = zivot_andrews(self.fail_mdl, autolag=autolag) assert res[3] == 1 # following tests compare results to R package urca.ur.za (1.13-0) def test_rgnp_case(self): - res = zivot_andrews(self.fail_mdl, maxlag=8, regression='c', - autolag=None) - assert_allclose([res[0], res[1], res[4]], - [-5.57615, 0.00312, 20], rtol=1e-3) + res = zivot_andrews( + self.fail_mdl, maxlag=8, regression="c", autolag=None + ) + assert_allclose( + [res[0], res[1], res[4]], [-5.57615, 0.00312, 20], rtol=1e-3 + ) def test_gnpdef_case(self): - mdlfile = os.path.join(self.run_dir, 'gnpdef.csv') + mdlfile = os.path.join(self.run_dir, "gnpdef.csv") mdl = np.asarray(pd.read_csv(mdlfile)) - res = zivot_andrews(mdl, maxlag=8, regression='c', autolag='t-stat') - assert_allclose([res[0], res[1], res[3], res[4]], - [-4.12155, 0.28024, 5, 40], rtol=1e-3) + res = zivot_andrews(mdl, maxlag=8, regression="c", autolag="t-stat") + assert_allclose( + [res[0], res[1], res[3], res[4]], + [-4.12155, 0.28024, 5, 40], + rtol=1e-3, + ) def test_stkprc_case(self): - mdlfile = os.path.join(self.run_dir, 'stkprc.csv') + mdlfile = os.path.join(self.run_dir, "stkprc.csv") mdl = np.asarray(pd.read_csv(mdlfile)) - res = zivot_andrews(mdl, maxlag=8, regression='ct', autolag='t-stat') - assert_allclose([res[0], res[1], res[3], res[4]], - [-5.60689, 0.00894, 1, 65], rtol=1e-3) + res = zivot_andrews(mdl, maxlag=8, regression="ct", autolag="t-stat") + assert_allclose( + [res[0], res[1], res[3], res[4]], + [-5.60689, 0.00894, 1, 65], + rtol=1e-3, + ) def test_rgnpq_case(self): - mdlfile = os.path.join(self.run_dir, 'rgnpq.csv') + mdlfile = os.path.join(self.run_dir, "rgnpq.csv") mdl = np.asarray(pd.read_csv(mdlfile)) - res = zivot_andrews(mdl, maxlag=12, regression='t', autolag='t-stat') - assert_allclose([res[0], res[1], res[3], res[4]], - [-3.02761, 0.63993, 12, 102], rtol=1e-3) + res = zivot_andrews(mdl, maxlag=12, regression="t", autolag="t-stat") + assert_allclose( + [res[0], res[1], res[3], res[4]], + [-3.02761, 0.63993, 12, 102], + rtol=1e-3, + ) def test_rand10000_case(self): - mdlfile = os.path.join(self.run_dir, 'rand10000.csv') + mdlfile = os.path.join(self.run_dir, "rand10000.csv") mdl = np.asarray(pd.read_csv(mdlfile)) - res = zivot_andrews(mdl, regression='c', autolag='t-stat') - assert_allclose([res[0], res[1], res[3], res[4]], - [-3.48223, 0.69111, 25, 7071], rtol=1e-3) + res = zivot_andrews(mdl, regression="c", autolag="t-stat") + assert_allclose( + [res[0], res[1], res[3], res[4]], + [-3.48223, 0.69111, 25, 7071], + rtol=1e-3, + ) def test_acf_conservate_nanops(reset_randomstate): # GH 6729 e = np.random.standard_normal(100) for i in range(1, e.shape[0]): - e[i] += 0.9 * e[i-1] + e[i] += 0.9 * e[i - 1] e[::7] = np.nan result = acf(e, missing="conservative", nlags=10, fft=False) resid = e - np.nanmean(e) expected = np.ones(11) nobs = e.shape[0] gamma0 = np.nansum(resid * resid) - for i in range(1, 10+1): - expected[i] = np.nansum(resid[i:] * resid[:nobs-i]) / gamma0 + for i in range(1, 10 + 1): + expected[i] = np.nansum(resid[i:] * resid[: nobs - i]) / gamma0 assert_allclose(result, expected, rtol=1e-4, atol=1e-4) @@ -1056,3 +1198,19 @@ def test_pacf_nlags_error(reset_randomstate): e = np.random.standard_normal(100) with pytest.raises(ValueError, match="Can only compute partial"): pacf(e, 50) + + +def test_coint_auto_tstat(): + rs = np.random.RandomState(3733696641) + x = np.cumsum(rs.standard_normal(100)) + y = np.cumsum(rs.standard_normal(100)) + res = coint( + x, + y, + trend="c", + method="aeg", + maxlag=0, + autolag="t-stat", + return_results=False, + ) + assert np.abs(res[0]) < 1.65
statsmodels.tsa.stattools.coint: UnboundLocalError: local variable 'bestlag' referenced before assignment #### Describe the bug If bestlag=0 and autolag='t-stat' an UnboundLocalError results. If bestlag is >0 and autolag='t-stat' results are normal. An assert or Warning might be nice. #### Code Sample, a copy-pastable example if possible Oddly, this: ```python coint( np.random.random(1000), np.random.random(1000), trend='c', method='aeg', maxlag=0, autolag='t-stat', return_results=False) ``` does not throw an error, but the same args with real accelerometer time series data does. ```python coint( arr[:sdp, i, ch], arr[:sdp, j, ch], trend='c', method='aeg', maxlag=0, autolag='t-stat', return_results=False) Traceback (most recent call last): File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 961, in <module> main() File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 883, in main OnSegment(dataObj, filename, textData, recurseLevel=0) File "E:\projects\Jan Medical\software\corr_surface\segment_2_34.py", line 352, in OnSegment coint_res = coint( File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 1392, in coint res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag, File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 267, in adfuller icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag, File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 115, in _autolag return icbest, bestlag nboundLocalError: local variable 'bestlag' referenced before assignment ``` <details> #### Expected Output I'd expect a Warning or AssertionError #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> >>> import statsmodels.api as sm; sm.show_versions() INSTALLED VERSIONS ------------------ Python: 3.8.2.final.0 statsmodels =========== Installed: 0.11.1 (C:\Python38\lib\site-packages\statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.19.0 (C:\Python38\lib\site-packages\numpy) scipy: 1.4.1 (C:\Python38\lib\site-packages\scipy) pandas: 1.0.3 (C:\Python38\lib\site-packages\pandas) dateutil: 2.8.1 (C:\Python38\lib\site-packages\dateutil) patsy: 0.5.1 (C:\Python38\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.2.0 (C:\Python38\lib\site-packages\matplotlib) backend: TkAgg cvxopt: Not installed joblib: 0.14.1 (C:\Python38\lib\site-packages\joblib) Developer Tools ================ IPython: 7.15.0 (C:\Python38\lib\site-packages\IPython) jinja2: 2.11.2 (C:\Python38\lib\site-packages\jinja2) sphinx: Not installed pygments: 2.6.1 (C:\Python38\lib\site-packages\pygments) pytest: Not installed virtualenv: Not installed </details>
I cannot reproduce on 0.12. What version are you using? Seems impossible to raise in 0.12. I'm going to close, but please post if you are experiencing this on 0.12. It was 0.11.1 I had to do pip install statsmodels=0.12 --force-reinstall I still get the error with 0.12: I added an npz (zipped so it would upload) [arr.zip](https://github.com/statsmodels/statsmodels/files/5163240/arr.zip) ``` >python Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> from statsmodels.tsa.stattools import coint >>> import numpy as np >>> arr= np.load('arr.npz') >>> arr = arr['a'] >>> coint( arr[:1024, 0, 5], arr[:1024, 1, 5], trend='c', method='aeg', maxlag=10, autolag='t-stat', return_results=Fa lse) (-2.0741079140064516, 0.48990605178066554, array([-3.90717771, -3.34210925, -3.04859844])) >>> coint( arr[:1024, 0, 5], arr[:1024, 1, 5], trend='c', method='aeg', maxlag=0, autolag='t-stat', return_results=Fal se) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 1569, in coint res_adf = adfuller( File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 309, in adfuller icbest, bestlag = _autolag( File "C:\Python38\lib\site-packages\statsmodels\tsa\stattools.py", line 143, in _autolag return icbest, bestlag UnboundLocalError: local variable 'bestlag' referenced before assignment ``` ENV: ``` >python Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import statsmodels.api as sm; sm.show_versions() INSTALLED VERSIONS ------------------ Python: 3.8.2.final.0 statsmodels =========== Installed: 0.12.0 (C:\Python38\lib\site-packages\statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.19.1 (C:\Python38\lib\site-packages\numpy) scipy: 1.5.2 (C:\Python38\lib\site-packages\scipy) pandas: 1.1.1 (C:\Python38\lib\site-packages\pandas) dateutil: 2.8.1 (C:\Python38\lib\site-packages\dateutil) patsy: 0.5.1 (C:\Python38\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.2.0 (C:\Python38\lib\site-packages\matplotlib) backend: TkAgg cvxopt: Not installed joblib: 0.14.1 (C:\Python38\lib\site-packages\joblib) Developer Tools ================ IPython: 7.15.0 (C:\Python38\lib\site-packages\IPython) jinja2: 2.11.2 (C:\Python38\lib\site-packages\jinja2) sphinx: Not installed pygments: 2.6.1 (C:\Python38\lib\site-packages\pygments) pytest: Not installed virtualenv: Not installed ``` With random data, coint is fine: ``` >>> coint( np.random.random(1000), np.random.random(1000), trend='c', method='aeg', maxlag=0, autolag='t-stat', return_results=False) (-30.617368909945277, 0.0, array([-3.90743646, -3.34225305, -3.04869817])) ``` Still can't reproduce ``` >>> from statsmodels.tsa.stattools import coint ...: >>> import numpy as np ...: >>> arr= np.load('arr.npz') ...: >>> arr = arr['a'] In [10]: from statsmodels.tsa.api import coint In [12]: coint( arr[:1024, 0, 5], arr[:1024, 1, 5], trend='c', method='aeg', maxlag=10, autolag='t-stat', return_resu ...: lts=False) Out[12]: (-2.0741079140053276, 0.4899060517812532, array([-3.90717771, -3.34210925, -3.04859844])) ``` Ohh, got it ``` coint( arr[:1024, 0, 5], arr[:1024, 1, 5], trend='c', method='aeg', maxlag=0, autolag='t-stat', return_results=False) ```
"2020-09-04T09:36:11Z"
0.12
[ "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_unknown_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_autolags_does_not_assign_lags_equal_to_nobs", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_store", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_legacy_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_deprecation", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_unclear_hypothesis", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_conservative", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_drop", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_raise", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_qstat_none", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_zero_lag", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_finite_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality_single", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_yw", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols_inefficient", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ld", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_store_str", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestCoint_t::test_tstat", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_confint", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_qstat", "statsmodels/tsa/tests/test_stattools.py::test_coint_identical_series", "statsmodels/tsa/tests/test_stattools.py::test_coint", "statsmodels/tsa/tests/test_stattools.py::test_coint_perfect_collinearity", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf2d", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic_failure", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_pandasacovf", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acf_fft_dataframe", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_filter_kalman_filter", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_short_series", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_error", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_warns", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_pandas", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg_error", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_rtol", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_ar", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_levinson_durbin_acov", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_errors", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_levinson_durbin", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_maxlag_too_large", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_errors", "statsmodels/tsa/tests/test_stattools.py::test_acf_warns", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acf_conservate_nanops", "statsmodels/tsa/tests/test_stattools.py::test_pacf_nlags_error", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_autolag_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_regression_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rand10000_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnpq_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_array_shape", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[Aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_gnpdef_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_stkprc_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[AIC]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_trim_value", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnp_case", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_teststat" ]
[ "statsmodels/tsa/tests/test_stattools.py::test_coint_auto_tstat" ]
Python
[]
[]
statsmodels/statsmodels
7,028
statsmodels__statsmodels-7028
[ "7027" ]
3d6a49310c106a5129add818b8d5551c6d84588e
diff --git a/statsmodels/robust/scale.py b/statsmodels/robust/scale.py --- a/statsmodels/robust/scale.py +++ b/statsmodels/robust/scale.py @@ -13,14 +13,15 @@ """ import numpy as np from scipy.stats import norm as Gaussian -from . import norms + from statsmodels.tools import tools from statsmodels.tools.validation import array_like, float_like + +from . import norms from ._qn import _qn -def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median): - # c \approx .6745 +def mad(a, c=Gaussian.ppf(3 / 4.0), axis=0, center=np.median): """ The Median Absolute Deviation along given axis of an array @@ -30,7 +31,7 @@ def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median): Input array. c : float, optional The normalization constant. Defined as scipy.stats.norm.ppf(3/4.), - which is approximately .6745. + which is approximately 0.6745. axis : int, optional The default is 0. Can also be None. center : callable or float @@ -43,19 +44,22 @@ def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median): mad : float `mad` = median(abs(`a` - center))/`c` """ - a = array_like(a, 'a', ndim=None) - c = float_like(c, 'c') + a = array_like(a, "a", ndim=None) + c = float_like(c, "c") if not a.size: - center = 0.0 + center_val = 0.0 elif callable(center): - center = np.apply_over_axes(center, a, axis) + if axis is not None: + center_val = np.apply_over_axes(center, a, axis) + else: + center_val = center(a.ravel()) else: - center = float_like(center, "center") + center_val = float_like(center, "center") - return np.median((np.abs(a-center)) / c, axis=axis) + return np.median((np.abs(a - center_val)) / c, axis=axis) -def iqr(a, c=Gaussian.ppf(3/4) - Gaussian.ppf(1/4), axis=0): +def iqr(a, c=Gaussian.ppf(3 / 4) - Gaussian.ppf(1 / 4), axis=0): """ The normalized interquartile range along given axis of an array @@ -75,8 +79,8 @@ def iqr(a, c=Gaussian.ppf(3/4) - Gaussian.ppf(1/4), axis=0): ------- The normalized interquartile range """ - a = array_like(a, 'a', ndim=None) - c = float_like(c, 'c') + a = array_like(a, "a", ndim=None) + c = float_like(c, "c") if a.ndim == 0: raise ValueError("a should have at least one dimension") @@ -114,9 +118,10 @@ def qn_scale(a, c=1 / (np.sqrt(2) * Gaussian.ppf(5 / 8)), axis=0): {float, ndarray} The Qn robust estimator of scale """ - a = array_like(a, 'a', ndim=None, dtype=np.float64, contiguous=True, - order='C') - c = float_like(c, 'c') + a = array_like( + a, "a", ndim=None, dtype=np.float64, contiguous=True, order="C" + ) + c = float_like(c, "c") if a.ndim == 0: raise ValueError("a should have at least one dimension") elif a.size == 0: @@ -197,7 +202,7 @@ def __init__(self, c=1.5, tol=1.0e-08, maxiter=30, norm=None): self.tol = tol self.norm = norm tmp = 2 * Gaussian.cdf(c) - 1 - self.gamma = tmp + c**2 * (1 - tmp) - 2 * c * Gaussian.pdf(c) + self.gamma = tmp + c ** 2 * (1 - tmp) - 2 * c * Gaussian.pdf(c) def __call__(self, a, mu=None, initscale=None, axis=0): """ @@ -262,35 +267,44 @@ def _estimate_both(self, a, scale, mu, axis, est_mu, n): # This is a one-step fixed-point estimator # if self.norm == norms.HuberT # It should be faster than using norms.HuberT - nmu = np.clip(a, mu-self.c*scale, - mu+self.c*scale).sum(axis) / a.shape[axis] + nmu = ( + np.clip( + a, mu - self.c * scale, mu + self.c * scale + ).sum(axis) + / a.shape[axis] + ) else: - nmu = norms.estimate_location(a, scale, self.norm, axis, - mu, self.maxiter, self.tol) + nmu = norms.estimate_location( + a, scale, self.norm, axis, mu, self.maxiter, self.tol + ) else: # Effectively, do nothing nmu = mu.squeeze() nmu = tools.unsqueeze(nmu, axis, a.shape) - subset = np.less_equal(np.abs((a - mu)/scale), self.c) + subset = np.less_equal(np.abs((a - mu) / scale), self.c) card = subset.sum(axis) - scale_num = np.sum(subset * (a - nmu)**2, axis) - scale_denom = (n * self.gamma - (a.shape[axis] - card) * self.c**2) + scale_num = np.sum(subset * (a - nmu) ** 2, axis) + scale_denom = n * self.gamma - (a.shape[axis] - card) * self.c ** 2 nscale = np.sqrt(scale_num / scale_denom) nscale = tools.unsqueeze(nscale, axis, a.shape) - test1 = np.alltrue(np.less_equal(np.abs(scale - nscale), - nscale * self.tol)) + test1 = np.alltrue( + np.less_equal(np.abs(scale - nscale), nscale * self.tol) + ) test2 = np.alltrue( - np.less_equal(np.abs(mu - nmu), nscale * self.tol)) + np.less_equal(np.abs(mu - nmu), nscale * self.tol) + ) if not (test1 and test2): mu = nmu scale = nscale else: return nmu.squeeze(), nscale.squeeze() - raise ValueError('joint estimation of location and scale failed ' - 'to converge in %d iterations' % self.maxiter) + raise ValueError( + "joint estimation of location and scale failed " + "to converge in %d iterations" % self.maxiter + ) huber = Huber() @@ -331,16 +345,22 @@ class HuberScale(object): and the Huber constant h = (n-p)/n*(d**2 + (1-d**2)*\ scipy.stats.norm.cdf(d) - .5 - d*sqrt(2*pi)*exp(-0.5*d**2) """ + def __init__(self, d=2.5, tol=1e-08, maxiter=30): self.d = d self.tol = tol self.maxiter = maxiter def __call__(self, df_resid, nobs, resid): - h = df_resid / nobs * ( - self.d ** 2 - + (1 - self.d ** 2) * Gaussian.cdf(self.d) - - .5 - self.d / (np.sqrt(2 * np.pi)) * np.exp(-.5 * self.d ** 2) + h = ( + df_resid + / nobs + * ( + self.d ** 2 + + (1 - self.d ** 2) * Gaussian.cdf(self.d) + - 0.5 + - self.d / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * self.d ** 2) + ) ) s = mad(resid) @@ -348,15 +368,22 @@ def subset(x): return np.less(np.abs(resid / x), self.d) def chi(s): - return subset(s) * (resid / s) ** 2 / 2 + (1 - subset(s)) * \ - (self.d ** 2 / 2) + return subset(s) * (resid / s) ** 2 / 2 + (1 - subset(s)) * ( + self.d ** 2 / 2 + ) scalehist = [np.inf, s] niter = 1 - while (np.abs(scalehist[niter - 1] - scalehist[niter]) > self.tol - and niter < self.maxiter): - nscale = np.sqrt(1 / (nobs * h) * np.sum(chi(scalehist[-1])) * - scalehist[-1] ** 2) + while ( + np.abs(scalehist[niter - 1] - scalehist[niter]) > self.tol + and niter < self.maxiter + ): + nscale = np.sqrt( + 1 + / (nobs * h) + * np.sum(chi(scalehist[-1])) + * scalehist[-1] ** 2 + ) scalehist.append(nscale) niter += 1 # TODO: raise on convergence failure?
diff --git a/statsmodels/robust/tests/test_scale.py b/statsmodels/robust/tests/test_scale.py --- a/statsmodels/robust/tests/test_scale.py +++ b/statsmodels/robust/tests/test_scale.py @@ -5,25 +5,52 @@ import numpy as np from numpy.random import standard_normal from numpy.testing import assert_almost_equal, assert_equal -from scipy.stats import norm as Gaussian import pytest -# Example from Section 5.5, Venables & Ripley (2002) +from scipy.stats import norm as Gaussian -import statsmodels.robust.scale as scale import statsmodels.api as sm +import statsmodels.robust.scale as scale +from statsmodels.robust.scale import mad + +# Example from Section 5.5, Venables & Ripley (2002) + DECIMAL = 4 # TODO: Can replicate these tests using stackloss data and R if this -# data is a problem +# data is a problem class TestChem(object): @classmethod def setup_class(cls): - cls.chem = np.array([ - 2.20, 2.20, 2.4, 2.4, 2.5, 2.7, 2.8, 2.9, 3.03, - 3.03, 3.10, 3.37, 3.4, 3.4, 3.4, 3.5, 3.6, 3.7, 3.7, 3.7, 3.7, - 3.77, 5.28, 28.95]) + cls.chem = np.array( + [ + 2.20, + 2.20, + 2.4, + 2.4, + 2.5, + 2.7, + 2.8, + 2.9, + 3.03, + 3.03, + 3.10, + 3.37, + 3.4, + 3.4, + 3.4, + 3.5, + 3.6, + 3.7, + 3.7, + 3.7, + 3.7, + 3.77, + 5.28, + 28.95, + ] + ) def test_mean(self): assert_almost_equal(np.mean(self.chem), 4.2804, DECIMAL) @@ -50,10 +77,12 @@ def test_huber_huberT(self): n = scale.norms.HuberT() n.t = 1.5 h = scale.Huber(norm=n) - assert_almost_equal(scale.huber(self.chem)[0], h(self.chem)[0], - DECIMAL) - assert_almost_equal(scale.huber(self.chem)[1], h(self.chem)[1], - DECIMAL) + assert_almost_equal( + scale.huber(self.chem)[0], h(self.chem)[0], DECIMAL + ) + assert_almost_equal( + scale.huber(self.chem)[1], h(self.chem)[1], DECIMAL + ) def test_huber_Hampel(self): hh = scale.Huber(norm=scale.norms.Hampel()) @@ -84,10 +113,11 @@ def test_mad_center(self): assert_equal(n.shape, (10,)) with pytest.raises(TypeError): scale.mad(self.X, center=None) - assert_almost_equal(scale.mad(self.X, center=1), - np.median(np.abs(self.X - 1), - axis=0)/Gaussian.ppf(3/4.), - DECIMAL) + assert_almost_equal( + scale.mad(self.X, center=1), + np.median(np.abs(self.X - 1), axis=0) / Gaussian.ppf(3 / 4.0), + DECIMAL, + ) class TestMadAxes(object): @@ -169,22 +199,30 @@ def setup_class(cls): cls.sunspot = sm.datasets.sunspots.load_pandas().data.SUNACTIVITY def test_qn_naive(self): - assert_almost_equal(scale.qn_scale(self.normal), - scale._qn_naive(self.normal), DECIMAL) - assert_almost_equal(scale.qn_scale(self.range), - scale._qn_naive(self.range), DECIMAL) - assert_almost_equal(scale.qn_scale(self.exponential), - scale._qn_naive(self.exponential), DECIMAL) + assert_almost_equal( + scale.qn_scale(self.normal), scale._qn_naive(self.normal), DECIMAL + ) + assert_almost_equal( + scale.qn_scale(self.range), scale._qn_naive(self.range), DECIMAL + ) + assert_almost_equal( + scale.qn_scale(self.exponential), + scale._qn_naive(self.exponential), + DECIMAL, + ) def test_qn_robustbase(self): # from R's robustbase with finite.corr = FALSE assert_almost_equal(scale.qn_scale(self.range), 13.3148, DECIMAL) - assert_almost_equal(scale.qn_scale(self.stackloss), - np.array([8.87656, 8.87656, 2.21914, 4.43828]), - DECIMAL) + assert_almost_equal( + scale.qn_scale(self.stackloss), + np.array([8.87656, 8.87656, 2.21914, 4.43828]), + DECIMAL, + ) # sunspot.year from datasets in R only goes up to 289 - assert_almost_equal(scale.qn_scale(self.sunspot[0:289]), 33.50901, - DECIMAL) + assert_almost_equal( + scale.qn_scale(self.sunspot[0:289]), 33.50901, DECIMAL + ) def test_qn_empty(self): empty = np.empty(0) @@ -255,3 +293,18 @@ def test_axis2(self): def test_axisneg1(self): m, s = self.h(self.X, axis=-1) assert_equal(m.shape, (40, 10)) + + +def test_mad_axis_none(): + # GH 7027 + a = np.array([[0, 1, 2], [2, 3, 2]]) + + def m(x): + return np.median(x) + + direct = mad(a=a, axis=None) + custom = mad(a=a, axis=None, center=m) + axis0 = mad(a=a.ravel(), axis=0) + + np.testing.assert_allclose(direct, custom) + np.testing.assert_allclose(direct, axis0)
BUG: error on passing axis=None to mad #### Describe the bug Setting axis=None in the mad function raises "TypeError: '<' not supported between instances of 'NoneType' and 'int'". According to the docs, axis=None is supported. The docs don't say so, but I guess the expected behaviour would be to have the mad applied on the flattened input array (like what np.sum does for axis=None for example). I'd be happy to write a fix for this. #### Code Sample ```python import numpy as np from statsmodels.robust.scale import mad a = np.array([[0, 1, 2],[2, 3, 2]]) mad(a=a, axis=None) # raises TypeError mad(a.flatten()) # this is what I would expect the output to be ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `master`. If your problem has been fixed in an unreleased version, you might be able to use `master` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the master branch of this repository? It helps the limited resources if we know problems exist in the current master so that they do not need to check whether the code sample produces a bug in the next release. </details> #### Expected Output The mad function applied to the flattened input array. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.8.2.final.0 OS: Linux 5.4.0-45-generic #49-Ubuntu SMP Wed Aug 26 13:38:52 UTC 2020 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: v0.13.0.dev0+18.g3d6a49310 (/home/esmucler/Packages/statsmodels/statsmodels) Required Dependencies ===================== cython: 0.29.21 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/Cython) numpy: 1.19.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/numpy) scipy: 1.5.2 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/scipy) pandas: 1.1.0 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/pandas) dateutil: 2.8.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/dateutil) patsy: 0.5.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.3.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/matplotlib) backend: agg cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: 7.17.0 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/IPython) jinja2: 2.11.2 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/jinja2) sphinx: 3.2.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/sphinx) pygments: 2.6.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/pygments) pytest: 6.0.1 (/home/esmucler/Packages/statsmodels/venv/lib/python3.8/site-packages/pytest) virtualenv: Not installed </details>
"2020-09-06T22:35:19Z"
0.12
[ "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestMadAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestMad::test_mad_empty", "statsmodels/robust/tests/test_scale.py::TestMad::test_mad", "statsmodels/robust/tests/test_scale.py::TestMad::test_mad_center", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_location", "statsmodels/robust/tests/test_scale.py::TestChem::test_qn", "statsmodels/robust/tests/test_scale.py::TestChem::test_iqr", "statsmodels/robust/tests/test_scale.py::TestChem::test_mean", "statsmodels/robust/tests/test_scale.py::TestChem::test_median", "statsmodels/robust/tests/test_scale.py::TestChem::test_mad", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_Hampel", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_scale", "statsmodels/robust/tests/test_scale.py::TestChem::test_huber_huberT", "statsmodels/robust/tests/test_scale.py::TestHuber::test_huber_result_shape", "statsmodels/robust/tests/test_scale.py::TestIqr::test_iqr_empty", "statsmodels/robust/tests/test_scale.py::TestIqr::test_iqr", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestIqrAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axis0", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestQnAxes::test_axisneg1", "statsmodels/robust/tests/test_scale.py::TestQn::test_qn_naive", "statsmodels/robust/tests/test_scale.py::TestQn::test_qn_robustbase", "statsmodels/robust/tests/test_scale.py::TestQn::test_qn_empty", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axis1", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_default", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axis2", "statsmodels/robust/tests/test_scale.py::TestHuberAxes::test_axisneg1" ]
[ "statsmodels/robust/tests/test_scale.py::test_mad_axis_none" ]
Python
[]
[]
statsmodels/statsmodels
7,055
statsmodels__statsmodels-7055
[ "7053" ]
2322d3597a65c6be1f07b36bf15a4bc7c927213b
diff --git a/statsmodels/tsa/holtwinters/results.py b/statsmodels/tsa/holtwinters/results.py --- a/statsmodels/tsa/holtwinters/results.py +++ b/statsmodels/tsa/holtwinters/results.py @@ -743,10 +743,11 @@ def simulate( if use_boxcox: y = inv_boxcox(y, lamda) - sim = np.squeeze(y) + sim = np.atleast_1d(np.squeeze(y)) + if y.shape[0] == 1 and y.size > 1: + sim = sim[None, :] # Wrap data / squeeze where appropriate - use_pandas = isinstance(self.model.data, PandasData) - if not use_pandas: + if not isinstance(self.model.data, PandasData): return sim _, _, _, index = self.model._get_prediction_index(
diff --git a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py --- a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py +++ b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py @@ -358,7 +358,10 @@ def test_ndarray(self): @pytest.mark.xfail(reason="Optimizer does not converge", strict=False) def test_forecast(self): fit1 = ExponentialSmoothing( - self.aust, seasonal_periods=4, trend="add", seasonal="add", + self.aust, + seasonal_periods=4, + trend="add", + seasonal="add", ).fit(method="bh", use_brute=True) assert_almost_equal( fit1.forecast(steps=4), [60.9542, 36.8505, 46.1628, 50.1272], 3 @@ -884,8 +887,10 @@ def test_float_boxcox(trend, seasonal): assert_allclose(res.params["use_boxcox"], 0.5) with pytest.warns(FutureWarning): res = ExponentialSmoothing( - housing_data, trend=trend, seasonal=seasonal - ).fit(use_boxcox = 0.5) + housing_data, + trend=trend, + seasonal=seasonal, + ).fit(use_boxcox=0.5) assert_allclose(res.params["use_boxcox"], 0.5) @@ -1624,7 +1629,10 @@ def test_error_initialization(ses): ExponentialSmoothing(ses, initial_seasonal=[1.0, 0.2, 0.05, 4]) with pytest.raises(ValueError): ExponentialSmoothing( - ses, trend="add", initialization_method="known", initial_level=1.0, + ses, + trend="add", + initialization_method="known", + initial_level=1.0, ) with pytest.raises(ValueError): ExponentialSmoothing( @@ -1963,3 +1971,28 @@ def test_boxcox_components(ses): assert not hasattr(res, "_untransformed_level") assert not hasattr(res, "_untransformed_trend") assert not hasattr(res, "_untransformed_seasonal") + + [email protected]("repetitions", [1, 10]) [email protected]("random_errors", [None, "bootstrap"]) +def test_forecast_1_simulation(austourists, random_errors, repetitions): + # GH 7053 + fit = ExponentialSmoothing( + austourists, + seasonal_periods=4, + trend="add", + seasonal="add", + damped_trend=True, + initialization_method="estimated", + ).fit() + + sim = fit.simulate( + 1, anchor=0, random_errors=random_errors, repetitions=repetitions + ) + expected_shape = (1,) if repetitions == 1 else (1, repetitions) + assert sim.shape == expected_shape + sim = fit.simulate( + 10, anchor=0, random_errors=random_errors, repetitions=repetitions + ) + expected_shape = (10,) if repetitions == 1 else (10, repetitions) + assert sim.shape == expected_shape
HoltWintersResults simulation throws ValueError ### **Describe:** I use the following example codes given on the page (https://www.statsmodels.org/stable/examples/notebooks/generated/exponential_smoothing.html): fit = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', initialization_method="estimated").fit() simulations = fit.simulate(16, anchor='2009-01-01', repetitions=100, error='mul', random_errors='bootstrap') I just changed `nsimulations` parameter from `16` to `1` and ran the codes again, then `ValueError` was thrown: ValueError Traceback (most recent call last) e:\python36\lib\site-packages\pandas\core\internals\managers.py in create_block_manager_from_blocks(blocks, axes) 1652 -> 1653 mgr = BlockManager(blocks, axes) 1654 mgr._consolidate_inplace() e:\python36\lib\site-packages\pandas\core\internals\managers.py in __init__(self, blocks, axes, do_integrity_check) 113 if do_integrity_check: --> 114 self._verify_integrity() 115 e:\python36\lib\site-packages\pandas\core\internals\managers.py in _verify_integrity(self) 310 if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: --> 311 construction_error(tot_items, block.shape[1:], self.axes) 312 if len(self.items) != tot_items: e:\python36\lib\site-packages\pandas\core\internals\managers.py in construction_error(tot_items, block_shape, axes, e) 1690 raise ValueError("Shape of passed values is {0}, indices imply {1}".format( -> 1691 passed, implied)) 1692 ValueError: Shape of passed values is (100, 1), indices imply (1, 1) During handling of the above exception, another exception occurred: ValueError Traceback (most recent call last) <ipython-input-13-1c647f688c20> in <module> 1 fit = ExponentialSmoothing(aust, seasonal_periods=4, trend='add', seasonal='mul', initialization_method="estimated").fit() ----> 2 simulations = fit.simulate(1, anchor='2009-01-01', repetitions=100, error='mul', random_errors='bootstrap') e:\python36\lib\site-packages\statsmodels\tsa\holtwinters\results.py in simulate(self, nsimulations, anchor, repetitions, error, random_errors, random_state) 756 sim = pd.Series(sim, index=index, name=self.model.endog_names) 757 else: --> 758 sim = pd.DataFrame(sim, index=index) 759 760 return sim e:\python36\lib\site-packages\pandas\core\frame.py in __init__(self, data, index, columns, dtype, copy) 422 else: 423 mgr = init_ndarray(data, index, columns, dtype=dtype, --> 424 copy=copy) 425 426 # For data is list-like, or Iterable (will consume into list) e:\python36\lib\site-packages\pandas\core\internals\construction.py in init_ndarray(values, index, columns, dtype, copy) 165 values = maybe_infer_to_datetimelike(values) 166 --> 167 return create_block_manager_from_blocks([values], [columns, index]) 168 169 e:\python36\lib\site-packages\pandas\core\internals\managers.py in create_block_manager_from_blocks(blocks, axes) 1658 blocks = [getattr(b, 'values', b) for b in blocks] 1659 tot_items = sum(b.shape[0] for b in blocks) -> 1660 construction_error(tot_items, blocks[0].shape[1:], axes, e) 1661 1662 e:\python36\lib\site-packages\pandas\core\internals\managers.py in construction_error(tot_items, block_shape, axes, e) 1689 raise ValueError("Empty data passed with indices specified.") 1690 raise ValueError("Shape of passed values is {0}, indices imply {1}".format( -> 1691 passed, implied)) 1692 1693 ValueError: Shape of passed values is (100, 1), indices imply (1, 1) Why can `nsimulations` not be `1` ? ### **Versions** pandas 0.24.2 numpy 1.16.4 statsmodels 0.12.0
"2020-09-25T22:27:16Z"
0.12
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_fit", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_r", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_buggy", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_ndarray", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_add_mul", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_simple_exp_smoothing", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_predict", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_2d_data", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_debiased", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_direct_holt_add", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_summary_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[Powell]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[SLSQP]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_minimizer_kwargs_error", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[trust-constr]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_valid_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infer_freq", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[100]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[2000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_bad_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_dampen", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[TNC]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_damping_trend_zero", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_basin_hopping", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[basinhopping]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[period]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infeasible_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_no_params_to_optimize", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_unfixable", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_boxcox_components", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[L-BFGS-B]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[1000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_errors", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_basic", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_attributes", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[irregular]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_start_param_length", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_initialization", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_integer_array", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[date_range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_keywords", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[least_squares]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_set_parameters" ]
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-10]" ]
Python
[]
[]
statsmodels/statsmodels
7,228
statsmodels__statsmodels-7228
[ "7127" ]
a058516c5f1517a03877cf77f158370c7d1a9d39
diff --git a/statsmodels/discrete/discrete_model.py b/statsmodels/discrete/discrete_model.py --- a/statsmodels/discrete/discrete_model.py +++ b/statsmodels/discrete/discrete_model.py @@ -774,7 +774,10 @@ def __init__(self, endog, exog, offset=None, exposure=None, missing='none', super().__init__(endog, exog, check_rank, missing=missing, offset=offset, exposure=exposure, **kwargs) if exposure is not None: + self.exposure = np.asarray(self.exposure) self.exposure = np.log(self.exposure) + if offset is not None: + self.offset = np.asarray(self.offset) self._check_inputs(self.offset, self.exposure, self.endog) if offset is None: delattr(self, 'offset')
diff --git a/statsmodels/discrete/tests/test_count_model.py b/statsmodels/discrete/tests/test_count_model.py --- a/statsmodels/discrete/tests/test_count_model.py +++ b/statsmodels/discrete/tests/test_count_model.py @@ -4,6 +4,7 @@ from numpy.testing import (assert_, assert_equal, assert_array_equal, assert_allclose) import pytest +import pandas as pd import statsmodels.api as sm from .results.results_discrete import RandHIE @@ -67,7 +68,6 @@ def test_summary(self): # GH 4581 assert 'Covariance Type:' in str(summ) - class TestZeroInflatedModel_logit(CheckGeneric): @classmethod def setup_class(cls): @@ -643,3 +643,19 @@ def test_zero_nonzero_mean(self): mean2 = ((1 - self.res.predict(which='prob-zero').mean()) * self.res.predict(which='mean-nonzero').mean()) assert_allclose(mean1, mean2, atol=0.2) + + +class TestPandasOffset: + + def test_pd_offset_exposure(self): + endog = pd.DataFrame({'F': [0.0, 0.0, 0.0, 0.0, 1.0]}) + exog = pd.DataFrame({'I': [1.0, 1.0, 1.0, 1.0, 1.0], + 'C': [0.0, 1.0, 0.0, 1.0, 0.0]}) + exposure = pd.Series([1., 1, 1, 2, 1]) + offset = pd.Series([1, 1, 1, 2, 1]) + sm.Poisson(endog=endog, exog=exog, offset=offset).fit() + inflations = ['logit', 'probit'] + for inflation in inflations: + sm.ZeroInflatedPoisson(endog=endog, exog=exog["I"], + exposure=exposure, + inflation=inflation).fit()
BUG: fix error in Poisson when offset or exposure is a pd.Series - [X] closes #7015 - [X] tests added / passed. - [X] code/documentation is well formatted. - [X] properly formatted commit message. See [NumPy's guide](https://docs.scipy.org/doc/numpy-1.15.1/dev/gitwash/development_workflow.html#writing-the-commit-message). <details> Just do np.asarray in the __init__ method of CountModel. Added a test that fails on master but passes here **Notes**: * It is essential that you add a test when making code changes. Tests are not needed for doc changes. * When adding a new function, test values should usually be verified in another package (e.g., R/SAS/Stata). * When fixing a bug, you must add a test that would produce the bug in master and then show that it is fixed with the new code. * New code additions must be well formatted. Changes should pass flake8. If on Linux or OSX, you can verify you changes are well formatted by running ``` git diff upstream/master -u -- "*.py" | flake8 --diff --isolated ``` assuming `flake8` is installed. This command is also available on Windows using the Windows System for Linux once `flake8` is installed in the local Linux environment. While passing this test is not required, it is good practice and it help improve code quality in `statsmodels`. * Docstring additions must render correctly, including escapes and LaTeX. </details>
"2020-12-22T20:18:14Z"
0.12
[ "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedPoisson_predict::test_var", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedPoisson_predict::test_predict_prob", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedPoisson_predict::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_names", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_minimize", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict2::test_zero_nonzero_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict2::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_exposure", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson_predict::test_predict_prob", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson_predict::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson_predict::test_var", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_predict_prob", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_var", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_predict_generic_zi", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_minimize", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_t" ]
[ "statsmodels/discrete/tests/test_count_model.py::TestPandasOffset::test_pd_offset_exposure" ]
Python
[]
[]
statsmodels/statsmodels
7,250
statsmodels__statsmodels-7250
[ "7191" ]
66dc957159f375d5c101f085b421a88a4c8e1c38
diff --git a/statsmodels/tsa/arima/model.py b/statsmodels/tsa/arima/model.py --- a/statsmodels/tsa/arima/model.py +++ b/statsmodels/tsa/arima/model.py @@ -8,6 +8,8 @@ import warnings +import numpy as np + from statsmodels.tools.data import _is_using_pandas from statsmodels.tsa.statespace import sarimax from statsmodels.tsa.statespace.kalman_filter import MEMORY_CONSERVE @@ -135,6 +137,22 @@ def __init__(self, endog, exog=None, order=(0, 0, 0), validate_specification=validate_specification) exog = self._spec_arima._model.data.orig_exog + # Raise an error if we have a constant in an integrated model + + has_trend = len(self._spec_arima.trend_terms) > 0 + if has_trend: + lowest_trend = np.min(self._spec_arima.trend_terms) + if lowest_trend < order[1] + seasonal_order[1]: + raise ValueError( + 'In models with integration (`d > 0`) or seasonal' + ' integration (`D > 0`), trend terms of lower order than' + ' `d + D` cannot be (as they would be eliminated due to' + ' the differencing operation). For example, a constant' + ' cannot be included in an ARIMA(1, 1, 1) model, but' + ' including a linear trend, which would have the same' + ' effect as fitting a constant to the differenced data,' + ' is allowed.') + # Keep the given `exog` by removing the prepended trend variables input_exog = None if exog is not None: diff --git a/statsmodels/tsa/statespace/sarimax.py b/statsmodels/tsa/statespace/sarimax.py --- a/statsmodels/tsa/statespace/sarimax.py +++ b/statsmodels/tsa/statespace/sarimax.py @@ -1732,7 +1732,7 @@ def _get_extension_time_varying_matrices( # Create extended model if extend_kwargs is None: extend_kwargs = {} - if not self.simple_differencing and self._k_trend > 0: + if not self.simple_differencing and self.k_trend > 0: extend_kwargs.setdefault( 'trend_offset', self.trend_offset + self.nobs) extend_kwargs.setdefault('validate_specification', False)
diff --git a/statsmodels/tsa/arima/tests/test_model.py b/statsmodels/tsa/arima/tests/test_model.py --- a/statsmodels/tsa/arima/tests/test_model.py +++ b/statsmodels/tsa/arima/tests/test_model.py @@ -239,10 +239,59 @@ def test_clone(): endog = dta['realgdp'].iloc[:100] exog = np.arange(endog.shape[0]) check_cloned(ARIMA(endog, order=(2, 1, 1), seasonal_order=(1, 1, 2, 4), - exog=exog, trend='c', concentrate_scale=True), + exog=exog, trend=[0, 0, 1], concentrate_scale=True), endog, exog=exog) +def test_constant_integrated_model_error(): + with pytest.raises(ValueError, match="In models with integration"): + ARIMA(np.ones(100), order=(1, 1, 0), trend='c') + + with pytest.raises(ValueError, match="In models with integration"): + ARIMA(np.ones(100), order=(1, 0, 0), seasonal_order=(1, 1, 0, 6), + trend='c') + + with pytest.raises(ValueError, match="In models with integration"): + ARIMA(np.ones(100), order=(1, 2, 0), trend='t') + + with pytest.raises(ValueError, match="In models with integration"): + ARIMA(np.ones(100), order=(1, 1, 0), seasonal_order=(1, 1, 0, 6), + trend='t') + + +def test_forecast(): + # Numpy + endog = dta['infl'].iloc[:100].values + + mod = ARIMA(endog[:50], order=(1, 1, 0), trend='t') + res = mod.filter([0.2, 0.3, 1.0]) + + endog2 = endog.copy() + endog2[50:] = np.nan + mod2 = mod.clone(endog2) + res2 = mod2.filter(res.params) + + assert_allclose(res.forecast(50), res2.fittedvalues[-50:]) + + +def test_forecast_with_exog(): + # Numpy + endog = dta['infl'].iloc[:100].values + exog = np.arange(len(endog))**2 + + mod = ARIMA(endog[:50], order=(1, 1, 0), exog=exog[:50], trend='t') + res = mod.filter([0.2, 0.05, 0.3, 1.0]) + + endog2 = endog.copy() + endog2[50:] = np.nan + mod2 = mod.clone(endog2, exog=exog) + print(mod.param_names) + print(mod2.param_names) + res2 = mod2.filter(res.params) + + assert_allclose(res.forecast(50, exog=exog[50:]), res2.fittedvalues[-50:]) + + def test_append(): endog = dta['infl'].iloc[:100].values mod = ARIMA(endog[:50], trend='c') @@ -267,6 +316,19 @@ def test_append_with_exog(): assert_allclose(res2.llf, res_e.llf) +def test_append_with_exog_and_trend(): + # Numpy + endog = dta['infl'].iloc[:100].values + exog = np.arange(len(endog))**2 + mod = ARIMA(endog[:50], exog=exog[:50], trend='ct') + res = mod.fit() + res_e = res.append(endog[50:], exog=exog[50:]) + mod2 = ARIMA(endog, exog=exog, trend='ct') + res2 = mod2.filter(res_e.params) + + assert_allclose(res2.llf, res_e.llf) + + def test_append_with_exog_pandas(): # Pandas endog = dta['infl'].iloc[:100]
ENH: Check trend when d > 0 in ARIMA-type models HI All, I am new to python so forgive me if I did not explain properly. I am reading a python time series forecast book. In it they talk of the Arima model and show the code to estimate the model. The code starts with from statsmodels.tsa.arima_model import ARIMA to specify the model: model = ARIMA(series, order=(5,1,0)) model_fit = model.fit(disp=0) When the results are printed out for the model(print(model_fit.summary() I get the following warning: ![Capture2](https://user-images.githubusercontent.com/75435215/101043455-c7010a00-3597-11eb-878a-a2e5ec1b054b.PNG) The results of the model are as follows: ![Capture3](https://user-images.githubusercontent.com/75435215/101043607-edbf4080-3597-11eb-8516-3d076b531f28.PNG) So when I go ahead and change the library above to read: from statsmodels.tsa.arima.model import ARIMA and run the arima model again, I get the following results: ![Capture4](https://user-images.githubusercontent.com/75435215/101043834-2a8b3780-3598-11eb-84e5-54fc7671f823.PNG) As you can see, the second results have no relation to what was obtained before. Does anyone have any idea why the new ARIMA is different than the old and how I can get the new one to give me the same results as the older version? Many thanks to you all.
The old method seems to have found a better set of parameters. Looks at the log likelihood, it is a little bit higher. You could try your own starting values and see what happens. I would guess the fits are pretty similar despite the differences in parameter values. Another thing is that the old `ARIMA` model always included a constant by default. In the new model, a constant is only included by default in stationary models (i.e. models in which `d = 0`, while you have `d=1`). To replicate the old behavior, you can do: ```python model = ARIMA(series, order=(5,1,0), trend='c') ``` Closing since I think this is answered, but feel free to follow up if you have questions. Thank you for the replies. I tried adding a constant as advised but things did not change much in terms of estimated coefficients. For example, when you calculate the average of the residuals in the older model it was -5, but the new model (even with the constant) is +22. Can you share your data? Also, what do you get when you run ``` AutoReg(Sales.diff().dropna(),5).fit().params ``` which uses OLS to estimate the model parameters. Assumes `Sales` is a `pd.Series`. > > > Can you share your data? Thanks a bunch Kevin. would love to. "Yr-Month","Sales" "1-01",266.0 "1-02",145.9 "1-03",183.1 "1-04",119.3 "1-05",180.3 "1-06",168.5 "1-07",231.8 "1-08",224.5 "1-09",192.8 "1-10",122.9 "1-11",336.5 "1-12",185.9 "2-01",194.3 "2-02",149.5 "2-03",210.1 "2-04",273.3 "2-05",191.4 "2-06",287.0 "2-07",226.0 "2-08",303.6 "2-09",289.9 "2-10",421.6 "2-11",264.5 "2-12",342.3 "3-01",339.7 "3-02",440.4 "3-03",315.9 "3-04",439.3 "3-05",401.3 "3-06",437.4 "3-07",575.5 "3-08",407.6 "3-09",682.0 "3-10",475.3 "3-11",581.3 "3-12",646.9 The book is a great time series book. My problem is that if parts of the model is deprecated, then I need to make sure I can still replicate the results of the book with the 'new' and updated model in python. > > > Also, what do you get when you run > > ``` > AutoReg(Sales.diff().dropna(),5).fit().params > ``` > > which uses OLS to estimate the model parameters. Assumes `Sales` is a `pd.Series`. intercept 49.014374 Sales.L1 -1.201207 Sales.L2 -0.763247 Sales.L3 -0.462942 Sales.L4 -0.190935 Sales.L5 0.136667 dtype: float64 This seems to give me closer coefficient estimates with the original deprecated model With the new model: ``` ARIMA(Sales.diff().dropna(),order=(5,0,0),trend="c").fit().params Out[127]: const 12.065186 ar.L1 -1.108131 ar.L2 -0.620273 ar.L3 -0.360547 ar.L4 -0.125167 ar.L5 0.128924 sigma2 4126.983200 dtype: float64 ``` The devil is in the details. You have to set the trend correctly: ``` ARIMA(Sales,order=(5,1,0),trend="t").fit().params Out[138]: x1 12.066901 ar.L1 -1.108919 ar.L2 -0.621157 ar.L3 -0.361282 ar.L4 -0.125640 ar.L5 0.128800 sigma2 4127.586987 dtype: float64 ``` @ChadFulton Sorry to bring you back in. This seems a little surprising to me. Is this expected -- that the included strand is difference. I noticed that the intercept when trend="c" was completely unstable which suggested to be that is the effectively being dropped. Is this the expected behavior? @bashtage thanks for bringing this up! I had forgotten about this difference. This is expected in the sense that `SARIMAX` (and so also `arima.ARIMA`) models the `trend` term relative to the original dataset. So you're right, for an integrated model, the constant term isn't identified, which is why it is not included by default. As one point of support for what we do: this behavior matches how R interprets trend terms. e.g. the base `arima` function ignores the `include.mean` argument if the model has integration, and the `forecast` package's `Arima` function does what `SARIMAX` does, meaning that to match the results here, you would need to include a drift term and not a constant term: ```R Arima(Sales, order=c(5, 1, 0), include.drift=TRUE) ``` Should it raise on `trend="c"` or `trend="ct"` when d=1? > Should it raise on trend="c" or trend="ct" when d=1? I agree, raising in these cases is probably the best idea. Thank you all for your support and patience. You are invaluable. Hello all. I have run into a similar issue as the original post. I am very new to all of this, so forgive me if the solution is obvious and has been covered above. I was also using the original ARIMA model `old_model = ARIMA(ts_log, order = (2,1,2))` with the understanding that **d = 1** would basically difference **t** and **t - 1**. This worked really well, but when I used the new model `new_model = ARIMA(ts_log, order = (2,1,2))`, the results were vastly different. I tried to add in `trend = c` or `trend = ct` etc. but none of those seemed to work. Should I be using **ts_log_diff** as input to `new_model` instead? **ts_log_diff** is just a first order difference of the original series. I am happy to provide more details/data. Again, total newbie here, so please forgive me if this is already covered above. I was just unsure of the changes and why the old version worked but the new one doesn't. Thank you! Can you post a minimal example that shows the difference between the models? Below is the code: ``` from statsmodels.tsa.arima.model import ARIMA from statsmodels.tsa.arima_model import ARIMA as arima ts_log_diff = ts_log - ts_log.shift() old_model = arima(ts_log, order = (2,1,2)) new_model = ARIMA(ts_log, order =(2,1,2)) old_results = old_model.fit() new_results = new_model.fit() plt.plot(ts_log_diff) plt.plot(old_results.fittedvalues, color='green') plt.plot(new_results.fittedvalues, color='red') ``` The code given above shows both the old and new version. I will also include the plots generated by both (separately just for clarity): OLD_MODEL: <img width="416" alt="Screen Shot 2021-01-05 at 5 33 52 PM" src="https://user-images.githubusercontent.com/21435381/103711358-1c03b780-4f6c-11eb-9fd8-6c35484a8b0e.png"> NEW_MODEL: <img width="396" alt="Screen Shot 2021-01-05 at 5 30 37 PM" src="https://user-images.githubusercontent.com/21435381/103711393-30e04b00-4f6c-11eb-9bf9-a9e88eebe340.png"> Not sure why the results are so different since I am passing in the same data to both. Thanks again for all the help! Try trend="t" in the new model. On Tue, Jan 5, 2021, 23:40 Kausthub Poondi <[email protected]> wrote: > Below is the code: > > from statsmodels.tsa.arima.model import ARIMA > from statsmodels.tsa.arima_model import ARIMA as arima > > ts_log_diff = ts_log - ts_log.shift() > > old_model = arima(ts_log, order = (2,1,2)) > new_model = ARIMA(ts_log, order =(2,1,2)) > > old_results = old_model.fit() > new_results = new_model.fit() > > plt.plot(ts_log_diff) > plt.plot(old_results.fittedvalues, color='green') > plt.plot(new_results.fittedvalues, color='red') > > The code given above shows both the old and new version. I will also > include the plots generated by both (separately just for clarity): > > OLD_MODEL: > [image: Screen Shot 2021-01-05 at 5 33 52 PM] > <https://user-images.githubusercontent.com/21435381/103711358-1c03b780-4f6c-11eb-9fd8-6c35484a8b0e.png> > > NEW_MODEL: > [image: Screen Shot 2021-01-05 at 5 30 37 PM] > <https://user-images.githubusercontent.com/21435381/103711393-30e04b00-4f6c-11eb-9bf9-a9e88eebe340.png> > > Not sure why the results are so different since I am passing in the same > data to both. Thanks again for all the help! > > β€” > You are receiving this because you were assigned. > Reply to this email directly, view it on GitHub > <https://github.com/statsmodels/statsmodels/issues/7191#issuecomment-754967386>, > or unsubscribe > <https://github.com/notifications/unsubscribe-auth/ABKTSRL524TDZVE2ER5RWWLSYOPPHANCNFSM4UMCU35A> > . > ```new_model = ARIMA(ts_log, order =(2,1,2), trend = "t")``` With this, I still get the same plot as above (the one in red). The forecasts of the old model were by default in the differenced scale, which is why they look similar when plotted against `ts_log_diff`. The new model forecasts in the scale of the original data, so it should be compared to `ts_log`. To check that the models are producing similar results, you can do: ```python plt.plot(ts_log) plt.plot(old_results.predict(typ='levels'), color='green') plt.plot(new_results.fittedvalues, color='red') ``` **Note**: I made a small change to the previous code sample. Looks like they are producing pretty similar results, as seen below. One last thing, is there a way to get rid of the initial spike in the red plot? Not sure why it appears, but ```.fit(disp = -1)``` doesn't work anymore because it says ```disp``` isn't recognized. Thank you so so much for your help! Really appreciate it. <img width="397" alt="Screen Shot 2021-01-06 at 10 00 34 AM" src="https://user-images.githubusercontent.com/21435381/103790011-84e34200-4ff5-11eb-9a65-772184204589.png"> > One last thing, is there a way to get rid of the initial spike in the red plot? The solution is to just not plot it (which is essentially what the old model does). In the future we might want to change cases like this to set the first period to NaN. While the current output of 0 is not incorrect (it is the model's prediction before seeing any data), it is not particularly useful. Ah ok got it. I was able to just remove the first term with a ```.drop(0)``` and that did the trick. However, I noticed that even after I solved all these issues and call the ```.predict()``` function. ``` num_months_ahead = 20 total_months = product_df.shape[0] + num_months_ahead ##OLD old_predictions = old_results.predict(1, total_months) predictions_ARIMA_diff_cumsum = old_predictions.cumsum() predictions_ARIMA_log = pd.Series([ts_log.iloc[0] for i in range(0, total_months + 1)]) predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum, fill_value=0) predictions_ARIMA = np.exp(predictions_ARIMA_log) print(predictions_ARIMA) ##NEW new_predictions = new_results.predict(1, total_months) predictions_ARIMA_new = np.exp(new_predictions) print(predictions_ARIMA_new) ``` The first section uses the old model (which is based on a tutorial I followed). The second section uses the new model and since it isn't differencing anymore, I just basically recover the original values by exponentiating. They both predict meaningful values for in-sample predictions, but _only the old model makes sense for out of sample predictions_. The new model just eventually predicts the same value (or within a very small range). Everything else between the two looks pretty much the same now (in fact the newer model is even better than the old one in terms of accuracy!) Would you happen to know why this occurs? Glad to hear things are working better. For this specific issue, we'd probably need an example with data that we could replicate to see what's going on. Sure thing! So what is being passed into the model is ``` ts_log = np.log(ts) plt.plot(ts_log) test_stationarity(ts_log) ``` where ```ts``` is: ``` 0 10723 1 12818 2 15794 3 13689 4 12742 5 13261 6 12888 7 12300 8 12161 9 12482 10 12899 11 12389 12 14225 13 15800 14 15700 15 14700 16 13800 17 14300 18 14649 19 15598 20 14960 21 15152 22 15655 23 15650 24 16976 25 18228 ``` With the old model, when I ask it to predict 20 months ahead, the following is returned (the output of ```print(predictions_ARIMA)``` from above): ``` 0 10723.000000 1 10929.722897 2 11430.980098 3 11441.502969 4 10553.236469 . . . 24 16026.353775 25 16826.569731 26 17766.065388 27 17455.037459 28 17482.395567 29 18529.172392 30 18905.241692 31 18607.908678 32 19197.703624 33 20092.013451 34 20062.835356 35 20129.758870 36 21017.305525 37 21530.181996 38 21442.901850 39 21947.612715 40 22798.706006 41 22990.065845 42 23135.591426 43 23920.244224 44 24528.551695 45 24632.661054 46 25112.486049 ``` However, with the new model, when I ask it to predict 20 months ahead, the following is returned (the output of ```print(predictions_ARIMA_new)``` from above): ``` 1 10723.000075 2 13063.122445 3 14894.213399 4 12054.779638 . . . 24 15593.157414 25 17094.670068 26 17528.850149 27 16699.588033 28 16739.071521 29 16937.395044 30 16956.253895 31 16911.831179 32 16901.098899 33 16910.048401 34 16913.853066 35 16912.278589 36 16911.156961 37 16911.369620 38 16911.664617 39 16911.656240 40 16911.585397 41 16911.577328 42 16911.592911 43 16911.597026 44 16911.593930 45 16911.592520 46 16911.593050 ``` As you can see, with the new model, it stabilizes around 16911 and just stays there forever (even if I predict 100 months ahead, the issue is the same). The old model however makes a lot more sense and predictions as we would expect. Really appreciate the help thus far :)
"2021-01-08T04:47:10Z"
0.12
[ "statsmodels/tsa/arima/tests/test_model.py::test_cov_type_none", "statsmodels/tsa/arima/tests/test_model.py::test_yule_walker", "statsmodels/tsa/arima/tests/test_model.py::test_nonstationary_gls_error", "statsmodels/tsa/arima/tests/test_model.py::test_append_with_exog_and_trend", "statsmodels/tsa/arima/tests/test_model.py::test_invalid", "statsmodels/tsa/arima/tests/test_model.py::test_append", "statsmodels/tsa/arima/tests/test_model.py::test_low_memory", "statsmodels/tsa/arima/tests/test_model.py::test_statespace", "statsmodels/tsa/arima/tests/test_model.py::test_innovations", "statsmodels/tsa/arima/tests/test_model.py::test_clone", "statsmodels/tsa/arima/tests/test_model.py::test_innovations_mle", "statsmodels/tsa/arima/tests/test_model.py::test_append_with_exog", "statsmodels/tsa/arima/tests/test_model.py::test_hannan_rissanen", "statsmodels/tsa/arima/tests/test_model.py::test_burg", "statsmodels/tsa/arima/tests/test_model.py::test_default_trend", "statsmodels/tsa/arima/tests/test_model.py::test_append_with_exog_pandas" ]
[ "statsmodels/tsa/arima/tests/test_model.py::test_forecast", "statsmodels/tsa/arima/tests/test_model.py::test_forecast_with_exog", "statsmodels/tsa/arima/tests/test_model.py::test_constant_integrated_model_error" ]
Python
[]
[]
statsmodels/statsmodels
7,279
statsmodels__statsmodels-7279
[ "6670" ]
a058516c5f1517a03877cf77f158370c7d1a9d39
diff --git a/statsmodels/stats/contingency_tables.py b/statsmodels/stats/contingency_tables.py --- a/statsmodels/stats/contingency_tables.py +++ b/statsmodels/stats/contingency_tables.py @@ -939,7 +939,7 @@ def __init__(self, tables, shift_zeros=False): sp = tables.shape if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2): raise ValueError("If an ndarray, argument must be 2x2xn") - table = tables + table = tables * 1. # use atleast float dtype else: if any([np.asarray(x).shape != (2, 2) for x in tables]): m = "If `tables` is a list, all of its elements should be 2x2"
diff --git a/statsmodels/stats/tests/test_contingency_tables.py b/statsmodels/stats/tests/test_contingency_tables.py --- a/statsmodels/stats/tests/test_contingency_tables.py +++ b/statsmodels/stats/tests/test_contingency_tables.py @@ -2,11 +2,13 @@ Tests for contingency table analyses. """ +import os +import warnings + import numpy as np import statsmodels.stats.contingency_tables as ctab import pandas as pd from numpy.testing import assert_allclose, assert_equal -import os import statsmodels.api as sm cur_dir = os.path.dirname(os.path.abspath(__file__)) @@ -324,8 +326,9 @@ def test_cochranq(): class CheckStratifiedMixin(object): @classmethod - def initialize(cls, tables): - cls.rslt = ctab.StratifiedTable(tables) + def initialize(cls, tables, use_arr=False): + tables1 = tables if not use_arr else np.dstack(tables) + cls.rslt = ctab.StratifiedTable(tables1) cls.rslt_0 = ctab.StratifiedTable(tables, shift_zeros=True) tables_pandas = [pd.DataFrame(x) for x in tables] cls.rslt_pandas = ctab.StratifiedTable(tables_pandas) @@ -377,8 +380,10 @@ def test_equal_odds(self): def test_pandas(self): - assert_equal(self.rslt.summary().as_text(), - self.rslt_pandas.summary().as_text()) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + assert_equal(self.rslt.summary().as_text(), + self.rslt_pandas.summary().as_text()) def test_from_data(self): @@ -453,7 +458,8 @@ def setup_class(cls): tables[3] = np.array([[12, 3], [7, 5]]) tables[4] = np.array([[1, 0], [3, 2]]) - cls.initialize(tables) + # check array of int + cls.initialize(tables, use_arr=True) cls.oddsratio_pooled = 3.5912 cls.logodds_pooled = np.log(3.5912)
BUG/ENH: StratifiedTable.test_null_odds fails with integer data example from meta-analysis PR `st = smstats.StratifiedTable(ctables.astype(np.float64))` without converting to float, data is int64 and raises the following exception in summary and test_null_odds() test_equal_odds() works correctly with integers ``` st.summary() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-49-bf722b7cb8e1> in <module> ----> 1 st.summary() ...\statsmodels\stats\contingency_tables.py in summary(self, alpha, float_format, method) 1270 headers = ["Statistic", "P-value", ""] 1271 stubs = ["Test of OR=1", "Test constant OR"] -> 1272 rslt1 = self.test_null_odds() 1273 rslt2 = self.test_equal_odds() 1274 data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]], ...\statsmodels\stats\contingency_tables.py in test_null_odds(self, correction) 1045 statistic = statistic**2 1046 denom = self._apb * self._apc * self._bpd * self._cpd -> 1047 denom /= (self._n**2 * (self._n - 1)) 1048 denom = np.sum(denom) 1049 statistic /= denom TypeError: No loop matching the specified signature and casting was found for ufunc true_divide ```
aside: the hypothesis test methods could have a better repr, Also it could use a generic Bunch instance, e.g. from stats testing tools (to which I added repr and str in my meta-analysis PR, AFAIR, not yet merged ) I get this in notebook ``` st.test_null_odds() <bunch containing results, print to see contents> ``` checking this again. The unit tests use list of int arrays which do not raise. If I provide tables as an np.array with dtype int, then it raises. The path for non arrays, forces float64: `table = np.dstack(tables).astype(np.float64)`
"2021-01-25T16:53:39Z"
0.12
[ "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_null_odds", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_equal_odds", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_oddsratio_pooled_confint", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_logodds_pooled", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_from_data", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_pandas", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_oddsratio_pooled", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified1::test_logodds_pooled_confint", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_from_data", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_logodds_pooled_confint", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_null_odds", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_oddsratio_pooled", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_pandas", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_oddsratio_pooled_confint", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_equal_odds", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified3::test_logodds_pooled", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_equal_odds", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_logodds_pooled_confint", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_from_data", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_oddsratio_pooled_confint", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_oddsratio_pooled", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_logodds_pooled", "statsmodels/stats/tests/test_contingency_tables.py::test_from_data_2x2", "statsmodels/stats/tests/test_contingency_tables.py::test_local_odds", "statsmodels/stats/tests/test_contingency_tables.py::test_from_data_stratified", "statsmodels/stats/tests/test_contingency_tables.py::test_chi2_association", "statsmodels/stats/tests/test_contingency_tables.py::test_cumulative_odds", "statsmodels/stats/tests/test_contingency_tables.py::test_SquareTable_from_data", "statsmodels/stats/tests/test_contingency_tables.py::test_resids", "statsmodels/stats/tests/test_contingency_tables.py::test_ordinal_association", "statsmodels/stats/tests/test_contingency_tables.py::test_mcnemar", "statsmodels/stats/tests/test_contingency_tables.py::test_stratified_table_cube", "statsmodels/stats/tests/test_contingency_tables.py::test_SquareTable_nonsquare", "statsmodels/stats/tests/test_contingency_tables.py::test_homogeneity", "statsmodels/stats/tests/test_contingency_tables.py::test_cochranq", "statsmodels/stats/tests/test_contingency_tables.py::test_shifting", "statsmodels/stats/tests/test_contingency_tables.py::test_symmetry", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_log_oddsratio", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_riskratio", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_log_oddsratio_se", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_oddsratio_pvalue", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_oddsratio", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_oddsratio_confint", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_log_riskratio_se", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_log_riskratio_confint", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_log_riskratio", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_riskratio_pvalue", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_from_data", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_summary", "statsmodels/stats/tests/test_contingency_tables.py::Test2x2_1::test_riskratio_confint" ]
[ "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_pandas", "statsmodels/stats/tests/test_contingency_tables.py::TestStratified2::test_null_odds" ]
Python
[]
[]
statsmodels/statsmodels
7,380
statsmodels__statsmodels-7380
[ "7377" ]
48bb953ff16f0230a84e24d8bcc1a1e1b82ed5a0
diff --git a/statsmodels/base/data.py b/statsmodels/base/data.py --- a/statsmodels/base/data.py +++ b/statsmodels/base/data.py @@ -578,10 +578,12 @@ def attach_rows(self, result): squeezed = squeezed[None, :] # May be zero-dim, for example in the case of forecast one step in tsa if squeezed.ndim < 2: - return Series(squeezed, index=self.row_labels[-len(result):]) + out = Series(squeezed) else: - return DataFrame(result, index=self.row_labels[-len(result):], - columns=self.ynames) + out = DataFrame(result) + out.columns = self.ynames + out.index = self.row_labels[-len(result):] + return out def attach_dates(self, result): squeezed = result.squeeze()
diff --git a/statsmodels/tsa/tests/test_ar.py b/statsmodels/tsa/tests/test_ar.py --- a/statsmodels/tsa/tests/test_ar.py +++ b/statsmodels/tsa/tests/test_ar.py @@ -14,6 +14,7 @@ import statsmodels.api as sm from statsmodels.iolib.summary import Summary from statsmodels.regression.linear_model import OLS +from statsmodels.tools.sm_exceptions import ValueWarning from statsmodels.tools.testing import assert_equal from statsmodels.tools.tools import Bunch from statsmodels.tsa.ar_model import AR, AutoReg, ar_select_order @@ -1553,3 +1554,18 @@ def test_autoreg_plot_err(): res = mod.fit() with pytest.raises(ValueError): res.plot_predict(0, end=50, in_sample=False) + + +def test_autoreg_resids(): + idx = pd.date_range("1900-01-01", periods=250, freq="M") + rs = np.random.RandomState(0) + idx_dates = sorted(rs.choice(idx, size=100, replace=False)) + e = rs.standard_normal(250) + y = np.zeros(250) + y[:2] = e[:2] + for i in range(2, 250): + y[i] = 2 + 1.8 * y[i - 1] - 0.95 * y[i - 2] + e[i] + ys = pd.Series(y[-100:], index=idx_dates, name="y") + with pytest.warns(ValueWarning): + res = AutoReg(ys, lags=2, old_names=False).fit() + assert np.all(np.isfinite(res.resid))
BUG: `attach_rows` does not work when Series change index #### Describe the bug When you set the index in a Series constructor with different values it looks up rather than assigns the series. See https://stackoverflow.com/questions/66523923/how-to-get-residuals-from-statsmodels-autoregresults-model-resid-returns-all-na/66578922?noredirect=1#comment117752130_66578922 #### Code Sample, a copy-pastable example if possible See https://stackoverflow.com/questions/66523923/how-to-get-residuals-from-statsmodels-autoregresults-model-resid-returns-all-na/66578922?noredirect=1#comment117752130_66578922 <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `master`. If your problem has been fixed in an unreleased version, you might be able to use `master` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the master branch of this repository? It helps the limited resources if we know problems exist in the current master so that they do not need to check whether the code sample produces a bug in the next release. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output Not all `nan` values. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] </details>
"2021-03-13T23:56:36Z"
0.12
[ "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_pvalues", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_bse", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_fpe", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_params", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_summary", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_llf", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_pickle", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_predict", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_fpe", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_predict", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_params", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_bse", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_pickle", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_llf", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_mle", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_summary", "statsmodels/tsa/tests/test_ar.py::TestAROLSNoConstant::test_pvalues", "statsmodels/tsa/tests/test_ar.py::TestAutolagAR::test_ic", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_pickle", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_summary", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_predict", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_fpe", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_bse", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_pvalues", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_params", "statsmodels/tsa/tests/test_ar.py::TestAROLSConstant::test_llf", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_params", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_summary", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_predict", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_bse", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_pickle", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_fpe", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_llf", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_pvalues", "statsmodels/tsa/tests/test_ar.py::TestARMLEConstant::test_predict", "statsmodels/tsa/tests/test_ar.py::TestARMLEConstant::test_dynamic_predict", "statsmodels/tsa/tests/test_ar.py::test_equiv_ols_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_f_test_ols_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_other_tests_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_conf_int_ols_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_autoreg_predict_smoke[lags:", "statsmodels/tsa/tests/test_ar.py::test_dynamic_forecast_smoke[lags:", "statsmodels/tsa/tests/test_ar.py::test_autoreg_smoke_plots[lags:", "statsmodels/tsa/tests/test_ar.py::test_constant_column_trend", "statsmodels/tsa/tests/test_ar.py::test_ar_score", "statsmodels/tsa/tests/test_ar.py::test_spec_errors", "statsmodels/tsa/tests/test_ar.py::test_ar_predict_no_fit", "statsmodels/tsa/tests/test_ar.py::test_ar_select_order_tstat", "statsmodels/tsa/tests/test_ar.py::test_roots", "statsmodels/tsa/tests/test_ar.py::test_ar_errors", "statsmodels/tsa/tests/test_ar.py::test_at_repeated_fit", "statsmodels/tsa/tests/test_ar.py::test_ar_repeated_fit", "statsmodels/tsa/tests/test_ar.py::test_ar_dates", "statsmodels/tsa/tests/test_ar.py::test_predict_errors", "statsmodels/tsa/tests/test_ar.py::test_summary_corner", "statsmodels/tsa/tests/test_ar.py::test_parameterless_autoreg", "statsmodels/tsa/tests/test_ar.py::test_ar_select_order", "statsmodels/tsa/tests/test_ar.py::test_ar_select_order_smoke", "statsmodels/tsa/tests/test_ar.py::test_bestlag_stop", "statsmodels/tsa/tests/test_ar.py::test_resids_mle", "statsmodels/tsa/tests/test_ar.py::test_ar_named_series", "statsmodels/tsa/tests/test_ar.py::test_ar_series", "statsmodels/tsa/tests/test_ar.py::test_ar_start_params", "statsmodels/tsa/tests/test_ar.py::test_forecast_start_end_equiv[True]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag9]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag0]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_plot_err", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag15]", "statsmodels/tsa/tests/test_ar.py::test_equiv_dynamic", "statsmodels/tsa/tests/test_ar.py::test_autoreg_series", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag8]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_start[21]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag2]", "statsmodels/tsa/tests/test_ar.py::test_forecast_start_end_equiv[False]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag3]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_roots", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag12]", "statsmodels/tsa/tests/test_ar.py::test_predict_exog", "statsmodels/tsa/tests/test_ar.py::test_predict_seasonal", "statsmodels/tsa/tests/test_ar.py::test_autoreg_score", "statsmodels/tsa/tests/test_ar.py::test_deterministic", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag6]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag10]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag7]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_named_series[False]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag4]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag14]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_start[25]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag11]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_predict_forecast_equiv", "statsmodels/tsa/tests/test_ar.py::test_predict_irregular_ar", "statsmodels/tsa/tests/test_ar.py::test_autoreg_constant_column_trend", "statsmodels/tsa/tests/test_ar.py::test_autoreg_forecast_period_index", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag13]", "statsmodels/tsa/tests/test_ar.py::test_dynamic_against_sarimax", "statsmodels/tsa/tests/test_ar.py::test_autoreg_summary_corner[False]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag1]", "statsmodels/tsa/tests/test_ar.py::test_ar_order_select", "statsmodels/tsa/tests/test_ar.py::test_autoreg_named_series[True]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_summary_corner[True]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag5]" ]
[ "statsmodels/tsa/tests/test_ar.py::test_autoreg_resids" ]
Python
[]
[]
statsmodels/statsmodels
7,383
statsmodels__statsmodels-7383
[ "7309" ]
7d707cfc27a56c8449fdb47b6f37a9fc8b360ea1
diff --git a/statsmodels/tsa/exponential_smoothing/base.py b/statsmodels/tsa/exponential_smoothing/base.py --- a/statsmodels/tsa/exponential_smoothing/base.py +++ b/statsmodels/tsa/exponential_smoothing/base.py @@ -206,7 +206,10 @@ def _wrap_data(self, data, start_idx, end_idx, names=None): if data.ndim > 1 and data.shape[1] == 1: data = np.squeeze(data, axis=1) if self.use_pandas: - _, _, _, index = self._get_prediction_index(start_idx, end_idx) + if data.shape[0]: + _, _, _, index = self._get_prediction_index(start_idx, end_idx) + else: + index = None if data.ndim < 2: data = pd.Series(data, index=index, name=names) else: diff --git a/statsmodels/tsa/exponential_smoothing/ets.py b/statsmodels/tsa/exponential_smoothing/ets.py --- a/statsmodels/tsa/exponential_smoothing/ets.py +++ b/statsmodels/tsa/exponential_smoothing/ets.py @@ -1757,7 +1757,6 @@ def simulate( principles and practice*, 2nd edition, OTexts: Melbourne, Australia. OTexts.com/fpp2. Accessed on February 28th 2020. """ - # Get the starting location start_idx = self._get_prediction_start_index(anchor) @@ -2286,20 +2285,20 @@ def __init__( ) # anchor anchor = start_smooth + i - sim_results.append( - results.simulate( - ndynamic, - anchor=anchor_dynamic, - repetitions=simulate_repetitions, - **simulate_kwargs, + if ndynamic: + sim_results.append( + results.simulate( + ndynamic, + anchor=anchor_dynamic, + repetitions=simulate_repetitions, + **simulate_kwargs, + ) ) - ) - self.simulation_results = np.concatenate(sim_results, axis=0) - # if self.use_pandas: - # self.simulation_results = pd.DataFrame( - # self.simulation_results, index=self.row_labels, - # columns=sim_results[0].columns - # ) + if sim_results and isinstance(sim_results[0], pd.DataFrame): + self.simulation_results = pd.concat(sim_results, 0) + else: + self.simulation_results = np.concatenate(sim_results, axis=0) + self.forecast_variance = self.simulation_results.var(1) else: # method == 'exact' steps = np.ones(ndynamic + nsmooth) if ndynamic > 0: diff --git a/statsmodels/tsa/forecasting/stl.py b/statsmodels/tsa/forecasting/stl.py --- a/statsmodels/tsa/forecasting/stl.py +++ b/statsmodels/tsa/forecasting/stl.py @@ -498,8 +498,22 @@ def get_prediction( pred = self._model_result.get_prediction( start=start, end=end, dynamic=dynamic, **kwargs ) - seasonal_prediction = self._get_seasonal_prediction(start, end, dynamic) + seasonal_prediction = self._get_seasonal_prediction( + start, end, dynamic + ) mean = pred.predicted_mean + seasonal_prediction + try: + var_pred_mean = pred.var_pred_mean + except (AttributeError, NotImplementedError): + # Allow models that do not return var_pred_mean + import warnings + + warnings.warn( + f"The variance of the predicted mean is not available using " + f"the {self.model.__class__.__name__} model class.", + UserWarning, + ) + var_pred_mean = np.nan + mean.copy() return PredictionResults( - mean, pred.var_pred_mean, dist="norm", row_labels=pred.row_labels + mean, var_pred_mean, dist="norm", row_labels=pred.row_labels )
diff --git a/statsmodels/tsa/forecasting/tests/test_stl.py b/statsmodels/tsa/forecasting/tests/test_stl.py --- a/statsmodels/tsa/forecasting/tests/test_stl.py +++ b/statsmodels/tsa/forecasting/tests/test_stl.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from statsmodels.datasets import sunspots +import statsmodels.datasets from statsmodels.tsa.ar_model import AutoReg from statsmodels.tsa.arima.model import ARIMA from statsmodels.tsa.base.prediction import PredictionResults @@ -133,15 +133,58 @@ def fit(self, *args, **kwargs): STLForecast(data, FakeModelSummary).fit().summary() -def test_get_prediction(): - # GH7309 - df = sunspots.load_pandas().data [email protected](scope="function") +def sunspots(): + df = statsmodels.datasets.sunspots.load_pandas().data df.index = np.arange(df.shape[0]) - y = df.iloc[:, 0] + return df.iloc[:, 0] + + +def test_get_prediction(sunspots): + # GH7309 stlf_model = STLForecast( - y, model=ARIMA, model_kwargs={"order": (2, 2, 0)}, period=11 + sunspots, model=ARIMA, model_kwargs={"order": (2, 2, 0)}, period=11 ) stlf_res = stlf_model.fit() pred = stlf_res.get_prediction() assert pred.predicted_mean.shape == (309,) assert pred.var_pred_mean.shape == (309,) + + [email protected]("not_implemented", [True, False]) +def test_no_var_pred(sunspots, not_implemented): + class DummyPred: + def __init__(self, predicted_mean, row_labels): + self.predicted_mean = predicted_mean + self.row_labels = row_labels + + def f(): + raise NotImplementedError + + if not_implemented: + self.forecast = property(f) + + class DummyRes: + def __init__(self, res): + self._res = res + + def forecast(self, *args, **kwargs): + return self._res.forecast(*args, **kwargs) + + def get_prediction(self, *args, **kwargs): + pred = self._res.get_prediction(*args, **kwargs) + + return DummyPred(pred.predicted_mean, pred.row_labels) + + class DummyMod: + def __init__(self, y): + self._mod = ARIMA(y) + + def fit(self, *args, **kwargs): + res = self._mod.fit(*args, **kwargs) + return DummyRes(res) + + stl_mod = STLForecast(sunspots, model=DummyMod, period=11) + stl_res = stl_mod.fit() + pred = stl_res.get_prediction() + assert np.all(np.isnan(pred.var_pred_mean))
STLForecastResults.get_prediction() with ETSModel as model fails when "trend"="mul" in the model kwargs I get an error when calling the get_prediction() method on a STLForecastResults object that is the output of STLForecast(endog=data, model=ETSModel, model_kwargs={'trend': 'mul'}).fit() https://github.com/statsmodels/statsmodels/blob/ecd6ecaf09e4864af156bd549002700e84097377/statsmodels/tsa/exponential_smoothing/ets.py#L2290 `self.forecast_variance` is not defined when `self.method == "simulated"` but `get_prediction()` calls `pred.var_pred_mean` that uses `self.forecast_variance` https://github.com/statsmodels/statsmodels/blob/ecd6ecaf09e4864af156bd549002700e84097377/statsmodels/tsa/forecasting/stl.py#L504
Stlf_model = STLForecast( ts, model =ARIMA, model_kwargs ={ 'order': (2,1,0)}, period =52) Stlf_model = Stlf_model.fit() Stlf_model.get_prediction() gives the following error ' Data must be 1-dimensional' However, Stlf_model.forecast() runs fine. But I need to get the corresponding confidence interval of the forecasted values Can you provide a complete, copy-pastable example that reproduces? Hi, I am using the following code (data attached) import pandas as pd from statsmodels.tsa.forecasting.stl import STLForecast from statsmodels.tsa.arima.model import ARIMA ts = pd. read_csv('../ts_example.csv') ts = ts.set_index('Date') ts = ts.resample('W-SUN').interpolate(method='linear') ts = ts.sort_index() stlf_model = STLForecast(endog=ts,model = ARIMA,model_kwargs= { 'order':(2,1,0)}, period = 52) stlf_model = stlf_model.fit() forecast = stlf_model.forecast(26) stlf_model.get_prediction() β€”- throws an error [ts_example.zip](https://github.com/statsmodels/statsmodels/files/6076016/ts_example.zip)
"2021-03-17T15:35:35Z"
0.12
[ "statsmodels/tsa/forecasting/tests/test_stl.py::test_equivalence_forecast[23-ETSModel]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_equivalence_forecast[7-ETSModel]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_smoke", "statsmodels/tsa/forecasting/tests/test_stl.py::test_equivalence_forecast[1-ETSModel]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_get_prediction", "statsmodels/tsa/forecasting/tests/test_stl.py::test_exceptions" ]
[ "statsmodels/tsa/forecasting/tests/test_stl.py::test_no_var_pred[True]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_no_var_pred[False]" ]
Python
[]
[]
statsmodels/statsmodels
7,413
statsmodels__statsmodels-7413
[ "7394" ]
26dbb94299e5c12221b157ee5837691b6d24a755
diff --git a/statsmodels/graphics/gofplots.py b/statsmodels/graphics/gofplots.py --- a/statsmodels/graphics/gofplots.py +++ b/statsmodels/graphics/gofplots.py @@ -392,6 +392,7 @@ def qqplot( line=None, other=None, ax=None, + swap: bool = False, **plotkwargs, ): """ @@ -432,6 +433,8 @@ def qqplot( ax : AxesSubplot, optional If given, this subplot is used to plot in instead of a new figure being created. + swap : bool, optional + Flag indicating to swap the x and y labels. **plotkwargs Additional arguments to be passed to the `plot` command. @@ -459,7 +462,6 @@ def qqplot( # the larger data set p = plotting_pos(self.nobs, self.a) s_other = stats.mstats.mquantiles(s_other, p) - fig, ax = _do_plot( s_other, s_self, self.dist, ax=ax, line=line, **plotkwargs ) @@ -468,6 +470,8 @@ def qqplot( xlabel = "Quantiles of 2nd Sample" if ylabel is None: ylabel = "Quantiles of 1st Sample" + if swap: + xlabel, ylabel = ylabel, xlabel else: fig, ax = _do_plot( @@ -770,11 +774,16 @@ def qqplot_2samples( data2 = ProbPlot(data2) if data2.data.shape[0] >= data1.data.shape[0]: fig = data1.qqplot( - xlabel=xlabel, ylabel=ylabel, line=line, other=data2, ax=ax + xlabel=ylabel, ylabel=xlabel, line=line, other=data2, ax=ax ) else: fig = data2.qqplot( - xlabel=ylabel, ylabel=xlabel, line=line, other=data1, ax=ax + xlabel=ylabel, + ylabel=xlabel, + line=line, + other=data1, + ax=ax, + swap=True, ) return fig
diff --git a/statsmodels/graphics/tests/test_gofplots.py b/statsmodels/graphics/tests/test_gofplots.py --- a/statsmodels/graphics/tests/test_gofplots.py +++ b/statsmodels/graphics/tests/test_gofplots.py @@ -423,19 +423,31 @@ def test_with_ax(self, close_figures): @pytest.mark.matplotlib def test_plot_full_options(self, close_figures): gofplots._do_plot( - self.x, self.y, ax=self.ax, step=False, **self.full_options, + self.x, + self.y, + ax=self.ax, + step=False, + **self.full_options, ) @pytest.mark.matplotlib def test_step_baseline(self, close_figures): gofplots._do_plot( - self.x, self.y, ax=self.ax, step=True, **self.step_options, + self.x, + self.y, + ax=self.ax, + step=True, + **self.step_options, ) @pytest.mark.matplotlib def test_step_full_options(self, close_figures): gofplots._do_plot( - self.x, self.y, ax=self.ax, step=True, **self.full_options, + self.x, + self.y, + ax=self.ax, + step=True, + **self.full_options, ) @pytest.mark.matplotlib @@ -616,3 +628,34 @@ def test_param_unpacking(): assert_equal(pp.fit_params, expected) pp = ProbPlot(np.empty(100), stats.beta(a=2, b=3, loc=4, scale=5)) assert_equal(pp.fit_params, expected) + + [email protected] [email protected]("labels", [{}, {"xlabel": "X", "ylabel": "Y"}]) [email protected]("x_size", [30, 50]) [email protected]("y_size", [30, 50]) [email protected]("line", [None, "45", "s", "r", "q"]) +def test_correct_labels(reset_randomstate, line, x_size, y_size, labels): + rs = np.random.RandomState(9876554) + x = rs.normal(loc=0, scale=0.1, size=x_size) + y = rs.standard_t(3, size=y_size) + pp_x = sm.ProbPlot(x) + pp_y = sm.ProbPlot(y) + fig = qqplot_2samples(pp_x, pp_y, line=line, **labels) + ax = fig.get_axes()[0] + x_label = ax.get_xlabel() + y_label = ax.get_ylabel() + if x_size <= y_size: + if not labels: + assert "2nd" in x_label + assert "1st" in y_label + else: + assert "Y" in x_label + assert "X" in y_label + else: + if not labels: + assert "1st" in x_label + assert "2nd" in y_label + else: + assert "X" in x_label + assert "Y" in y_label
BUG: fix wrong axis label in qqplot_2samples The label location is wrong in qqplot_2samples. When data1 and data2 are of equal sizes, x_label will correspond to data2, which is wrong, (it should correspond to data1). BTW, why permitting different data sizes here? To me, it doesn't make sense for a Q-Q plot to have different sample sizes on data1 and data2. Not sure how to add a test for plot, but this code can produce results needed: d1 = np.arange(3) d2 = np.arange(0,30,10) pp_x = sm.ProbPlot(d1) pp_y = sm.ProbPlot(d2) qqplot_2samples(pp_x, pp_y,xlabel="d1") new_qqplot_2samples(pp_x, pp_y,xlabel="d1") - [ ] closes #xxxx - [x] tests added / passed. - [ ] code/documentation is well formatted. - [ ] properly formatted commit message. See [NumPy's guide](https://docs.scipy.org/doc/numpy-1.15.1/dev/gitwash/development_workflow.html#writing-the-commit-message). <details> **Notes**: * It is essential that you add a test when making code changes. Tests are not needed for doc changes. * When adding a new function, test values should usually be verified in another package (e.g., R/SAS/Stata). * When fixing a bug, you must add a test that would produce the bug in main and then show that it is fixed with the new code. * New code additions must be well formatted. Changes should pass flake8. If on Linux or OSX, you can verify you changes are well formatted by running ``` git diff upstream/main -u -- "*.py" | flake8 --diff --isolated ``` assuming `flake8` is installed. This command is also available on Windows using the Windows System for Linux once `flake8` is installed in the local Linux environment. While passing this test is not required, it is good practice and it help improve code quality in `statsmodels`. * Docstring additions must render correctly, including escapes and LaTeX. </details>
"2021-04-12T12:47:08Z"
0.12
[ "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_r_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_r_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_q", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_45_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_non45_no_y", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_badline", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_45_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_45", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_s_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_q_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_q_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_r", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_s", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_s_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_non45_no_x", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_non45_no_x_no_y", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot_2samples_arrays", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot_2samples_prob_plot_objects", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_cunnane", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_piecewise", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_weibull", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_approx_med_unbiased", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_lininterp", "statsmodels/graphics/tests/test_gofplots.py::TestCheckDist::test_bad", "statsmodels/graphics/tests/test_gofplots.py::TestCheckDist::test_good", "statsmodels/graphics/tests/test_gofplots.py::test_qqplot_unequal", "statsmodels/graphics/tests/test_gofplots.py::test_invalid_dist_config", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_param_unpacking", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_loc_set_in_dist", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_scale_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_loc_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_scale_set_in_dist", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_scale_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_loc_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_exceptions", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestCompareSamplesDifferentSize::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestCompareSamplesDifferentSize::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_baseline", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_step_qq_line", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_step_baseline", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_with_ax", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_plot_qq_line", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_plot_full_options", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_step_full_options" ]
[ "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-50-labels1]" ]
Python
[]
[]
statsmodels/statsmodels
7,508
statsmodels__statsmodels-7508
[ "7507" ]
6a49f1cc12aa6db9d35f84cca1a2df530d47ecb7
diff --git a/statsmodels/tsa/exponential_smoothing/base.py b/statsmodels/tsa/exponential_smoothing/base.py --- a/statsmodels/tsa/exponential_smoothing/base.py +++ b/statsmodels/tsa/exponential_smoothing/base.py @@ -103,7 +103,10 @@ def fix_params(self, params): cache_free_params_index = self._free_params_index # Validate parameter names and values - self._validate_can_fix_params(set(params.keys())) + all_fixed_param_names = ( + set(params.keys()) | set(self._fixed_params.keys()) + ) + self._validate_can_fix_params(all_fixed_param_names) # Set the new fixed parameters, keeping the order as given by # param_names diff --git a/statsmodels/tsa/statespace/mlemodel.py b/statsmodels/tsa/statespace/mlemodel.py --- a/statsmodels/tsa/statespace/mlemodel.py +++ b/statsmodels/tsa/statespace/mlemodel.py @@ -492,7 +492,10 @@ def fix_params(self, params): cache_free_params_index = self._free_params_index # Validate parameter names and values - self._validate_can_fix_params(set(params.keys())) + all_fixed_param_names = ( + set(params.keys()) | set(self._fixed_params.keys()) + ) + self._validate_can_fix_params(all_fixed_param_names) # Set the new fixed parameters, keeping the order as given by # param_names
diff --git a/statsmodels/tsa/statespace/tests/test_fixed_params.py b/statsmodels/tsa/statespace/tests/test_fixed_params.py --- a/statsmodels/tsa/statespace/tests/test_fixed_params.py +++ b/statsmodels/tsa/statespace/tests/test_fixed_params.py @@ -32,6 +32,21 @@ def test_fix_params(): assert_equal(mod._free_params_index, None) +def test_nested_fix_params(): + mod = mlemodel.MLEModel([], 1) + mod._param_names = ['a', 'b', 'c'] + with mod.fix_params({'a': 2, 'b': 0}): + with mod.fix_params({'b': 1.}): + assert_(mod._has_fixed_params) + assert_equal(mod._fixed_params, {'a': 2, 'b': 1.}) + assert_equal(mod._fixed_params_index, [0, 1]) + assert_equal(mod._free_params_index, [2]) + assert_(not mod._has_fixed_params) + assert_equal(mod._fixed_params, {}) + assert_equal(mod._fixed_params_index, None) + assert_equal(mod._free_params_index, None) + + def test_results_append(): endog = macrodata['infl'] endog1 = endog.iloc[:100] @@ -117,7 +132,17 @@ def test_results_apply(): assert_allclose(res2_fit.llf_obs, res_fit.llf_obs) -def test_sarimax_invalid(): +def test_mle_validate(): + mod = mlemodel.MLEModel([], 1) + mod._param_names = ['a', 'b', 'c'] + msg = 'Invalid parameter name passed: "d"' + + with pytest.raises(ValueError, match=msg): + with mod.fix_params({'d': 1}): + pass + + +def test_sarimax_validate(): # Test for invalid uses of parameter fixing endog = macrodata['infl'] mod1 = sarimax.SARIMAX(endog, order=(2, 0, 0)) @@ -139,6 +164,7 @@ def test_sarimax_invalid(): assert_equal(mod1._fixed_params, {'ar.L1': 0.5, 'ar.L2': 0.2}) assert_equal(mod1._fixed_params_index, [0, 1]) assert_equal(mod1._free_params_index, [2]) + res = mod1.fit_constrained({'ar.L1': 0.5, 'ar.L2': 0.2}, start_params=[7.0], disp=False) assert_(res._has_fixed_params) @@ -146,11 +172,19 @@ def test_sarimax_invalid(): assert_equal(res._fixed_params_index, [0, 1]) assert_equal(res._free_params_index, [2]) + with mod1.fix_params({'ar.L1': 0.5, 'ar.L2': 0.}): + # overwrite ar.L2 with nested fix_params + with mod1.fix_params({'ar.L2': 0.2}): + assert_(mod1._has_fixed_params) + assert_equal(mod1._fixed_params, {'ar.L1': 0.5, 'ar.L2': 0.2}) + assert_equal(mod1._fixed_params_index, [0, 1]) + assert_equal(mod1._free_params_index, [2]) -def test_structural_invalid(): + +def test_structural_validate(): # Test for invalid uses of parameter fixing endog = macrodata['infl'] - mod1 = structural.UnobservedComponents(endog, 'rwalk', ar=2) + mod1 = structural.UnobservedComponents(endog, 'rwalk', autoregressive=2) # Try to fix invalid parameter assert_raises(ValueError, mod1.fit_constrained, {'AR.L1': 0.5}) @@ -161,8 +195,31 @@ def test_structural_invalid(): pass assert_raises(ValueError, mod1.fit_constrained, {'ar.L1': 0.5}) + # But can fix the entire set of parameters that are part of a multivariate + # transformation + with mod1.fix_params({'ar.L1': 0.5, 'ar.L2': 0.2}): + assert_(mod1._has_fixed_params) + assert_equal(mod1._fixed_params, {'ar.L1': 0.5, 'ar.L2': 0.2}) + assert_equal(mod1._fixed_params_index, [2, 3]) + assert_equal(mod1._free_params_index, [0, 1]) + + res = mod1.fit_constrained({'ar.L1': 0.5, 'ar.L2': 0.2}, + start_params=[7.0], disp=False) + assert_(res._has_fixed_params) + assert_equal(res._fixed_params, {'ar.L1': 0.5, 'ar.L2': 0.2}) + assert_equal(res._fixed_params_index, [2, 3]) + assert_equal(res._free_params_index, [0, 1]) + + with mod1.fix_params({'ar.L1': 0.5, 'ar.L2': 0.}): + # overwrite ar.L2 with nested fix_params + with mod1.fix_params({'ar.L2': 0.2}): + assert_(mod1._has_fixed_params) + assert_equal(mod1._fixed_params, {'ar.L1': 0.5, 'ar.L2': 0.2}) + assert_equal(mod1._fixed_params_index, [2, 3]) + assert_equal(mod1._free_params_index, [0, 1]) + -def test_dynamic_factor_invalid(): +def test_dynamic_factor_validate(): # Test for invalid uses of parameter fixing endog = np.log(macrodata[['cpi', 'realgdp', 'realinv']]).diff().iloc[1:] endog = (endog - endog.mean()) / endog.std() @@ -200,24 +257,37 @@ def test_dynamic_factor_invalid(): with pytest.raises(ValueError): with mod2.fix_params({'L1.f1.f1': 0.5}): pass + constraints = {'L1.f1.f1': 0.3, 'L2.f1.f1': 0.1} with mod2.fix_params(constraints): assert_(mod2._has_fixed_params) assert_equal(mod2._fixed_params, constraints) assert_equal(mod2._fixed_params_index, [6, 7]) assert_equal(mod2._free_params_index, [0, 1, 2, 3, 4, 5]) + res2 = mod2.fit_constrained(constraints, disp=False) assert_(res2._has_fixed_params) assert_equal(res2._fixed_params, constraints) assert_equal(res2._fixed_params_index, [6, 7]) assert_equal(res2._free_params_index, [0, 1, 2, 3, 4, 5]) + with mod2.fix_params(constraints): + # overwrite L1.f1.f1 with nested fix_params + with mod2.fix_params({'L1.f1.f1': -0.3}): + assert_(mod2._has_fixed_params) + assert_equal( + mod2._fixed_params, {'L1.f1.f1': -0.3, 'L2.f1.f1': 0.1} + ) + assert_equal(mod2._fixed_params_index, [6, 7]) + assert_equal(mod2._free_params_index, [0, 1, 2, 3, 4, 5]) + # (same as previous, now k_factors=2) mod3 = dynamic_factor.DynamicFactor( endog, k_factors=2, factor_order=1, error_cov_type='diagonal') with pytest.raises(ValueError): with mod3.fix_params({'L1.f1.f1': 0.3}): pass + constraints = dict([('L1.f1.f1', 0.3), ('L1.f2.f1', 0.1), ('L1.f1.f2', -0.05), ('L1.f2.f2', 0.1)]) with mod3.fix_params(constraints): @@ -225,12 +295,25 @@ def test_dynamic_factor_invalid(): assert_equal(mod3._fixed_params, constraints) assert_equal(mod3._fixed_params_index, [9, 10, 11, 12]) assert_equal(mod3._free_params_index, [0, 1, 2, 3, 4, 5, 6, 7, 8]) + res3 = mod3.fit_constrained(constraints, disp=False) assert_(res3._has_fixed_params) assert_equal(res3._fixed_params, constraints) assert_equal(res3._fixed_params_index, [9, 10, 11, 12]) assert_equal(res3._free_params_index, [0, 1, 2, 3, 4, 5, 6, 7, 8]) + with mod3.fix_params(constraints): + # overwrite L1.f1.f1 and L1.f2.f2 with nested fix_params + with mod3.fix_params({'L1.f1.f1': -0.3, 'L1.f2.f2': -0.1}): + assert_(mod3._has_fixed_params) + assert_equal( + mod3._fixed_params, + dict([('L1.f1.f1', -0.3), ('L1.f2.f1', 0.1), + ('L1.f1.f2', -0.05), ('L1.f2.f2', -0.1)]) + ) + assert_equal(mod3._fixed_params_index, [9, 10, 11, 12]) + assert_equal(mod3._free_params_index, [0, 1, 2, 3, 4, 5, 6, 7, 8]) + # Now, with enforce_stationarity=False, we can fix any of the factor AR # coefficients mod4 = dynamic_factor.DynamicFactor( @@ -286,7 +369,7 @@ def test_dynamic_factor_invalid(): assert_equal(res6._free_params_index, [4, 5]) -def test_varmax_invalid(): +def test_varmax_validate(): # Test for invalid uses of parameter fixing endog = np.log(macrodata[['cpi', 'realgdp']]).diff().iloc[1:] exog = np.log(macrodata[['realinv']]).diff().iloc[1:] @@ -330,18 +413,30 @@ def test_varmax_invalid(): with pytest.raises(ValueError): with mod3.fix_params({'L1.cpi.cpi': 0.5}): pass + constraints = {'L1.cpi.cpi': 0.3, 'L2.cpi.cpi': 0.1} with mod3.fix_params(constraints): assert_(mod3._has_fixed_params) assert_equal(mod3._fixed_params, constraints) assert_equal(mod3._fixed_params_index, [1, 2]) assert_equal(mod3._free_params_index, [0, 3]) + res3 = mod3.fit_constrained(constraints, start_params=[0, 1.], disp=False) assert_(res3._has_fixed_params) assert_equal(res3._fixed_params, constraints) assert_equal(res3._fixed_params_index, [1, 2]) assert_equal(res3._free_params_index, [0, 3]) + with mod3.fix_params(constraints): + # overwrite L1.cpi.cpi with nested fix_params + with mod3.fix_params({'L1.cpi.cpi': -0.3}): + assert_(mod3._has_fixed_params) + assert_equal( + mod3._fixed_params, {'L1.cpi.cpi': -0.3, 'L2.cpi.cpi': 0.1} + ) + assert_equal(mod3._fixed_params_index, [1, 2]) + assert_equal(mod3._free_params_index, [0, 3]) + # With k_endog > 1, we can only fix the entire set of AR coefficients when # `enforce_stationarity=True`. mod4 = varmax.VARMAX(endog, order=(1, 0))
(Minor) BUG: statespace MLEModel throws a false validation error in a rare edge case of nested fix_params #### Describe the bug ##### Context: - The parameters we can fix depend on parameters like `enforce_stationarity` and `enforce_invertibility`. - As an example, when fixing parameters for `mod = SARIMAX(endog, order=(2, 0, 0), enforce_stationarity=False)`, we cannot fix individual AR parameters (reference: `._validate_can_fix_params`). In other words, we should fix either 1) both `ar.L1` and `ar.L2` or 2) neither. - The implementation of `SARIMAX` (and more generally `MLEModel`) generally supports nested `fix_params`. - However, the following rare edge case throws a false-negative validation error. ##### Edge case: The following edge case first fixes both `ar.L1` and `ar.L2` in the outer `fix_params` (which works as expected), and then overwrites the fixed value of `ar.L1` in an inner nested `fixed_params` (which should work but throws an error). The reason is that only the newly added fixed params are passed into `_validate_can_fix_params` while the already fixed params are ignored. ``` mod = SARIMAX(endog, order=(2, 0, 0), enforce_stationarity=False) with mod.fix_params({'ar.L1': 1, 'ar.L2': 5}): # overwrites outer fixed ar.L1 with a nested fix_param with mod.fix_params({'ar.L1': -1}): print(mod._fixed_params) # should be {'ar.L1': -1, 'ar.L2': 5} ``` Please see below for a full code snippet . ##### Other similarly affected modules: The same goes to a few other subclasses of `MLEModel` which implements a similar `_validate_can_fix_params` function, such as `DynamicFactor`, `UnobservedComponents`, and `VARMAX`. ##### Fix: This should only require a one-liner-ish fix that passes both already fixed params and newly fixed params into `_validate_can_fix_params` inside `MLEModel`. #### Code Sample, a copy-pastable example if possible ```python # imports from statsmodels import datasets from statsmodels.tsa.statespace import sarimax as sarimax_model # data macrodata = datasets.macrodata.load_pandas().data endog = macrodata['infl'].copy() # models mod_1 = sarimax_model.SARIMAX(endog, order=(2, 0, 0), enforce_stationarity=False) mod_2 = sarimax_model.SARIMAX(endog, order=(2, 0, 0), enforce_stationarity=True) # this works as expected with mod_1.fix_params({'ar.L1': 1, 'ar.L2': 5}): # overwrites with a nested fix_param s with mod_1.fix_params({'ar.L1': -1}): print(mod_1._fixed_params) # output: {'ar.L1': -1, 'ar.L2': 5} # BUG: this throws error although mod_2._fixed_params is valid with mod_2.fix_params({'ar.L1': 1, 'ar.L2': 5}): # overwrites with a nested fix_param with mod_2.fix_params({'ar.L1': -1}): print(mod_2._fixed_params) # should be {'ar.L1': -1, 'ar.L2': 5} but throws error ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `main`. If your problem has been fixed in an unreleased version, you might be able to use `main` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the main branch of this repository? It helps the limited resources if we know problems exist in the current main branch so that they do not need to check whether the code sample produces a bug in the next release. If the issue has not been resolved, please file it in the issue tracker. </details> #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> ``` INSTALLED VERSIONS ------------------ Python: 3.9.2.final.0 OS: Linux 5.8.0-55-generic #62~20.04.1-Ubuntu SMP Wed Jun 2 08:55:04 UTC 2021 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: v0.13.0.dev0+380.g79ddd9882 Required Dependencies ===================== cython: 0.29.22 numpy: 1.20.1 scipy: 1.6.1 pandas: 1.2.3 dateutil: 2.8.1 patsy: 0.5.1 Optional Dependencies ===================== matplotlib: 3.3.4 Backend TkAgg is interactive backend. Turning interactive mode on. backend: TkAgg cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: 7.21.0 jinja2: 2.11.3 sphinx: 3.5.3 pygments: 2.8.1 pytest: 6.2.2 virtualenv: Not installed ``` </details>
"2021-06-17T05:08:16Z"
0.12
[ "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_results_append", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_mle_validate", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_structural", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_results_extend", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_dynamic_factor_diag_error_cov", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_results_apply", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_nested_fix_params", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_fix_params", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_sarimax_nonconsecutive", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_score_shape" ]
[ "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_varmax_validate", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_dynamic_factor_validate", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_sarimax_validate", "statsmodels/tsa/statespace/tests/test_fixed_params.py::test_structural_validate" ]
Python
[]
[]
statsmodels/statsmodels
7,511
statsmodels__statsmodels-7511
[ "7510" ]
f8420a0a085354846092dd046ab2c49a83c649b3
diff --git a/statsmodels/base/model.py b/statsmodels/base/model.py --- a/statsmodels/base/model.py +++ b/statsmodels/base/model.py @@ -2216,12 +2216,6 @@ def remove_data(self): cls_attrs[name] = attr data_attrs = [x for x in cls_attrs if isinstance(cls_attrs[x], cached_data)] - value_attrs = [x for x in cls_attrs - if isinstance(cls_attrs[x], cached_value)] - # make sure the cached for value_attrs are evaluated; this needs to - # occur _before_ any other attributes are removed. - for name in value_attrs: - getattr(self, name) for name in data_attrs: self._cache[name] = None diff --git a/statsmodels/regression/rolling.py b/statsmodels/regression/rolling.py --- a/statsmodels/regression/rolling.py +++ b/statsmodels/regression/rolling.py @@ -497,6 +497,7 @@ def __init__( self._cov_type = cov_type self._use_pandas = self.model.data.row_labels is not None self._data_attr = [] + self._cache = {} def _wrap(self, val): """Wrap output as pandas Series or DataFrames as needed"""
diff --git a/statsmodels/regression/tests/test_quantile_regression.py b/statsmodels/regression/tests/test_quantile_regression.py --- a/statsmodels/regression/tests/test_quantile_regression.py +++ b/statsmodels/regression/tests/test_quantile_regression.py @@ -281,3 +281,11 @@ def test_alpha_summary(): summ_20 = res.summary(alpha=.2) assert '[0.025 0.975]' not in str(summ_20) assert '[0.1 0.9]' in str(summ_20) + + +def test_remove_data(): + X = np.array([[1, 0], [0, 1], [0, 2.1], [0, 3.1]], dtype=np.float64) + y = np.array([0, 1, 2, 3], dtype=np.float64) + + res = QuantReg(y, X).fit(0.5) + res.remove_data()
BUG: remove_data faills if model has non-implemented attributes with cache #### Describe the bug If there is a non-implemented attribute in the model with cache, `remove_data` fails at `getattr` call in L2224 of model.py. https://github.com/statsmodels/statsmodels/blob/main/statsmodels/base/model.py#L2224 #### Code Sample, a copy-pastable example if possible ```python from statsmodels.regression.quantile_regression import QuantReg X = np.array([[1, 0], [0, 1], [0, 2.1], [0, 3.1]], dtype=np.float64) y = np.array([0, 1, 2, 3], dtype=np.float64) res = QuantReg(y, X).fit(0.5) res.remove_data() ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `main`. If your problem has been fixed in an unreleased version, you might be able to use `main` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the main branch of this repository? It helps the limited resources if we know problems exist in the current main branch so that they do not need to check whether the code sample produces a bug in the next release. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output The code does not raise `NotImplementedError` #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.9.2.final.0 OS: Linux 5.4.0-74-generic #83-Ubuntu SMP Sat May 8 02:35:39 UTC 2021 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: v0.13.0.dev0+391.g3f6fa5599 (/home/mknz/dev/statsmodels/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.20.3 (/home/mknz/dev/statsmodels/env/lib/python3.9/site-packages/numpy) scipy: 1.6.3 (/home/mknz/dev/statsmodels/env/lib/python3.9/site-packages/scipy) pandas: 1.2.4 (/home/mknz/dev/statsmodels/env/lib/python3.9/site-packages/pandas) dateutil: 2.8.1 (/home/mknz/dev/statsmodels/env/lib/python3.9/site-packages/dateutil) patsy: 0.5.1 (/home/mknz/dev/statsmodels/env/lib/python3.9/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.4.2 (/home/mknz/.pyenv/versions/3.9.2/lib/python3.9/site-packages/matplotlib) backend: TkAgg cvxopt: Not installed joblib: 1.0.1 (/home/mknz/.pyenv/versions/3.9.2/lib/python3.9/site-packages/joblib) Developer Tools ================ IPython: 7.23.1 (/home/mknz/.pyenv/versions/3.9.2/lib/python3.9/site-packages/IPython) jinja2: 3.0.0 (/home/mknz/.pyenv/versions/3.9.2/lib/python3.9/site-packages/jinja2) sphinx: Not installed pygments: 2.9.0 (/home/mknz/.pyenv/versions/3.9.2/lib/python3.9/site-packages/pygments) pytest: 6.2.4 (/home/mknz/dev/statsmodels/env/lib/python3.9/site-packages/pytest) virtualenv: 20.4.6 (/home/mknz/.pyenv/versions/3.9.2/lib/python3.9/site-packages/virtualenv) </details>
"2021-06-18T12:51:21Z"
0.12
[ "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianHsheather::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::test_fitted_residuals", "statsmodels/regression/tests/test_quantile_regression.py::test_alpha_summary", "statsmodels/regression/tests/test_quantile_regression.py::test_zero_resid", "statsmodels/regression/tests/test_quantile_regression.py::test_use_t_summary", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightHsheather::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightBofinger::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneChamberlain::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneBofinger::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheatherQ75::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianChamberlain::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestParzeneHsheather::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestGaussianBofinger::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineHsheather::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovBofinger::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovHsheather::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestEpanechnikovChamberlain::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestBiweightChamberlain::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_bandwidth", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineChamberlain::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_bse", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_df_model", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_nobs", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_prsquared", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_conf_int", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_sparsity", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_tvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_params", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_df_resid", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_pvalues", "statsmodels/regression/tests/test_quantile_regression.py::TestCosineBofinger::test_bandwidth" ]
[ "statsmodels/regression/tests/test_quantile_regression.py::test_remove_data" ]
Python
[]
[]
statsmodels/statsmodels
7,538
statsmodels__statsmodels-7538
[ "7527" ]
5a909860e8b79769cacb3a78254c22eadc8bc324
diff --git a/statsmodels/tsa/statespace/sarimax.py b/statsmodels/tsa/statespace/sarimax.py --- a/statsmodels/tsa/statespace/sarimax.py +++ b/statsmodels/tsa/statespace/sarimax.py @@ -1879,6 +1879,10 @@ def __init__(self, model, params, filter_results, cov_type=None, end += k setattr(self, '_params_%s' % name, self.params[start:end]) start += k + # GH7527, all terms must be defined + all_terms = ['ar', 'ma', 'seasonal_ar', 'seasonal_ma', 'variance'] + for name in set(all_terms).difference(self.param_terms): + setattr(self, '_params_%s' % name, np.empty(0)) # Handle removing data self._data_attr_model.extend(['orig_endog', 'orig_exog'])
diff --git a/statsmodels/tsa/statespace/tests/test_save.py b/statsmodels/tsa/statespace/tests/test_save.py --- a/statsmodels/tsa/statespace/tests/test_save.py +++ b/statsmodels/tsa/statespace/tests/test_save.py @@ -39,6 +39,19 @@ def test_sarimax(temp_filename): assert_allclose(res.llf, res2.llf) +# GH7527 [email protected]("order", ((4, 1, 0), (0, 1, 4), (0, 2, 0))) +def test_sarimax_save_remove_data(temp_filename, order): + mod = sarimax.SARIMAX(macrodata["realgdp"].values, order=order) + res = mod.smooth(mod.start_params) + res.summary() + res.save(temp_filename, remove_data=True) + res2 = sarimax.SARIMAXResults.load(temp_filename) + assert_allclose(res.params, res2.params) + assert_allclose(res.bse, res2.bse) + assert_allclose(res.llf, res2.llf) + + def test_sarimax_pickle(): mod = sarimax.SARIMAX(macrodata['realgdp'].values, order=(4, 1, 0)) pkl_mod = pickle.loads(pickle.dumps(mod))
BUG: SARIMAXResults save_model() broken when MA order=0 #### Describe the bug The SARIMAXResults() save_model() method breaks when the MA order is 0. It gives: AttributeError: 'SARIMAXResults' object has no attribute '_params_ma' #### Code Sample, a copy-pastable example if possible from statsmodels.tsa.statespace.sarimax import SARIMAX import pandas as pd ts = pd.Series(range(100)) model = SARIMAX(ts,order=(1,0,0)) model_fit = model.fit() model_fit.save('testing.model', remove_data=True) #### Expected Output Should save without error. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.7.10.final.0 OS: Linux 3.10.0-514.el7.x86_64 #1 SMP Tue Nov 22 16:42:41 UTC 2016 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.12.2 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.20.1 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/numpy) scipy: 1.6.1 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/scipy) pandas: 1.2.3 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/pandas) dateutil: 2.8.1 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/dateutil) patsy: 0.5.1 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.3.4 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/matplotlib) backend: module://ipykernel.pylab.backend_inline cvxopt: Not installed joblib: 1.0.1 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/joblib) Developer Tools ================ IPython: 7.21.0 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/IPython) jinja2: Not installed sphinx: Not installed pygments: 2.8.1 (/shared/apps/anaconda37/envs/fo1/lib/python3.7/site-packages/pygments) pytest: Not installed virtualenv: Not installed </details>
"2021-07-06T09:10:04Z"
0.12
[ "statsmodels/tsa/statespace/tests/test_save.py::test_structural", "statsmodels/tsa/statespace/tests/test_save.py::test_dynamic_factor_pickle", "statsmodels/tsa/statespace/tests/test_save.py::test_structural_pickle", "statsmodels/tsa/statespace/tests/test_save.py::test_sarimax_pickle", "statsmodels/tsa/statespace/tests/test_save.py::test_varmax", "statsmodels/tsa/statespace/tests/test_save.py::test_sarimax", "statsmodels/tsa/statespace/tests/test_save.py::test_existing_pickle", "statsmodels/tsa/statespace/tests/test_save.py::test_dynamic_factor", "statsmodels/tsa/statespace/tests/test_save.py::test_varmax_pickle" ]
[ "statsmodels/tsa/statespace/tests/test_save.py::test_sarimax_save_remove_data[order0]", "statsmodels/tsa/statespace/tests/test_save.py::test_sarimax_save_remove_data[order2]", "statsmodels/tsa/statespace/tests/test_save.py::test_sarimax_save_remove_data[order1]" ]
Python
[]
[]
statsmodels/statsmodels
7,574
statsmodels__statsmodels-7574
[ "7565" ]
195be20082b3e4b1884e7370a401aaf7fab3ced9
diff --git a/statsmodels/tsa/holtwinters/results.py b/statsmodels/tsa/holtwinters/results.py --- a/statsmodels/tsa/holtwinters/results.py +++ b/statsmodels/tsa/holtwinters/results.py @@ -283,14 +283,16 @@ def forecast(self, steps=1): """ try: freq = getattr(self.model._index, "freq", 1) - if isinstance(freq, int): - start = self.model._index.shape[0] - end = start + steps - 1 - else: + if not isinstance(freq, int) and isinstance( + self.model._index, (pd.DatetimeIndex, pd.PeriodIndex) + ): start = self.model._index[-1] + freq end = self.model._index[-1] + steps * freq + else: + start = self.model._index.shape[0] + end = start + steps - 1 return self.model.predict(self.params, start=start, end=end) - except (AttributeError, ValueError): + except AttributeError: # May occur when the index does not have a freq return self.model._predict(h=steps, **self.params).fcastvalues
diff --git a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py --- a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py +++ b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py @@ -808,7 +808,9 @@ def test_invalid_seasonal(seasonal): def test_2d_data(): with pytest.raises(ValueError): - ExponentialSmoothing(pd.concat([housing_data, housing_data], axis=1)).fit() + ExponentialSmoothing( + pd.concat([housing_data, housing_data], axis=1) + ).fit() def test_infer_freq(): @@ -2034,3 +2036,41 @@ def test_estimated_initialization_short_data(ses, trend, seasonal, nobs): initialization_method="estimated", ).fit() assert res.mle_retvals.success + + +def test_invalid_index(reset_randomstate): + y = np.random.standard_normal(12 * 200) + df_y = pd.DataFrame(data=y) + # Can't have a freq here + df_y.index.freq = "d" + + model = ExponentialSmoothing( + df_y, + seasonal_periods=12, + trend="add", + seasonal="add", + initialization_method="heuristic", + ) + fitted = model.fit(optimized=True, use_brute=True) + + fcast = fitted.forecast(steps=157200) + assert fcast.shape[0] == 157200 + + index = pd.date_range("2020-01-01", periods=2 * y.shape[0]) + index = np.random.choice(index, size=df_y.shape[0], replace=False) + index = sorted(index) + df_y.index = index + assert isinstance(df_y.index, pd.DatetimeIndex) + assert df_y.index.freq is None + assert df_y.index.inferred_freq is None + with pytest.warns(ValueWarning, match="A date index has been provided"): + model = ExponentialSmoothing( + df_y, + seasonal_periods=12, + trend="add", + seasonal="add", + initialization_method="heuristic", + ) + fitted = model.fit(optimized=True, use_brute=True) + with pytest.warns(ValueWarning, match="No supported"): + fitted.forecast(steps=157200)
BUG: Check `freq` only when useful in forecasting. #### Describe the bug I'm getting above TypeError while executing line 23 (forcast()) even though my frequency is not 'str'. #### Code Sample, a copy-pastable example if possible ```python from statsmodels.tsa.holtwinters import ExponentialSmoothing as HWES #read the data file. the date column is expected to be in the mm-dd-yyyy format. df_train_y = pd.DataFrame(data = tsne_train_output) df_train_y.index.freq = 'd' df_test_y = pd.DataFrame(data = tsne_test_output) df_test_y.index.freq = 'd' #plot the data df_train_y.plot() plt.show() #build and train the model on the training data model = HWES(df_train_y, seasonal_periods=144, trend='add', seasonal='add') fitted = model.fit(optimized=True, use_brute=True) #print out the training summary print(fitted.summary()) #create an out of sample forcast for the next 12 steps beyond the final data point in the training data set trend_forecast = fitted.forecast(steps= 157200) #plot the training data, the test data and the forecast on the same plot fig = plt.figure() fig.suptitle('Actual #picups Vs Predicted #pickups') past, = plt.plot(df_train_y.index, df_train_y, 'b.-', label='Actual #Pickups') future, = plt.plot(df_test_y.index, df_test_y, 'r.-', label='Predicted #pickup') predicted_future, = plt.plot(df_test_y.index, trend_forecast, 'g.-', label='#pickups forcasted') plt.legend(handles=[past, future, predicted_future]) plt.show() ``` ### Error Message ``` TypeError Traceback (most recent call last) <ipython-input-153-3c04122733a1> in <module>() 21 22 #create an out of sample forcast for the next 12 steps beyond the final data point in the training data set ---> 23 trend_forecast = fitted.forecast(steps= 157200) 24 25 #plot the training data, the test data and the forecast on the same plot 1 frames /usr/local/lib/python3.7/dist-packages/statsmodels/tsa/holtwinters.py in forecast(self, steps) 344 try: 345 freq = getattr(self.model._index, 'freq', 1) --> 346 start = self.model._index[-1] + freq 347 end = self.model._index[-1] + steps * freq 348 return self.model.predict(self.params, start=start, end=end) TypeError: unsupported operand type(s) for +: 'int' and 'str' ``` <details> #### Expected Output I would like to get forcast values in trend_forcast #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] INSTALLED VERSIONS ------------------ Python: 3.7.10.final.0 OS: Linux 5.4.104+ #1 SMP Sat Jun 5 09:50:34 PDT 2021 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 Statsmodels =========== Installed: 0.10.2 (/usr/local/lib/python3.7/dist-packages/statsmodels) Required Dependencies ===================== cython: 0.29.23 (/usr/local/lib/python3.7/dist-packages/Cython) numpy: 1.19.5 (/usr/local/lib/python3.7/dist-packages/numpy) scipy: 1.4.1 (/usr/local/lib/python3.7/dist-packages/scipy) pandas: 1.1.5 (/usr/local/lib/python3.7/dist-packages/pandas) dateutil: 2.8.1 (/usr/local/lib/python3.7/dist-packages/dateutil) patsy: 0.5.1 (/usr/local/lib/python3.7/dist-packages/patsy) Optional Dependencies ===================== matplotlib: 3.2.2 (/usr/local/lib/python3.7/dist-packages/matplotlib) backend: module://ipykernel.pylab.backend_inline cvxopt: 1.2.6 (/usr/local/lib/python3.7/dist-packages/cvxopt) joblib: 1.0.1 (/usr/local/lib/python3.7/dist-packages/joblib) Developer Tools ================ IPython: 5.5.0 (/usr/local/lib/python3.7/dist-packages/IPython) jinja2: 2.11.3 (/usr/local/lib/python3.7/dist-packages/jinja2) sphinx: 1.8.5 (/usr/local/lib/python3.7/dist-packages/sphinx) pygments: 2.6.1 (/usr/local/lib/python3.7/dist-packages/pygments) pytest: 3.6.4 (/usr/local/lib/python3.7/dist-packages) virtualenv: Not installed </details>
Running the code below on master produces no errors. ```python from statsmodels.tsa.holtwinters import ExponentialSmoothing as HWES import numpy as np y = np.random.standard_normal(144*1000) #build and train the model on the training data model = HWES(y, seasonal_periods=144, trend='add', seasonal='add') fitted = model.fit(optimized=True, use_brute=True) #print out the training summary print(fitted.summary()) #create an out of sample forcast for the next 12 steps beyond the final data point in the training data set trend_forecast = fitted.forecast(steps= 157200) ``` If you run this and see an error, please upgrade to master. If you upgrade to master and see an error in your code, it is likely something with the dataset you are using. You might reset the index to be an integer index. We really only support integer, DateTime or Period indices. We might not check this enough. @bashtage Thank you for the reply! I ran the code provided by you to check if that is working or not, that worked without any errors. As suggested I also tried to reset index for the datafram and reran the code, it is showing the same error. Also, when i'm trying to run `type(fitted.model._index[-1])` on the model fitted on my dataset, the output is `int`. This is how my dataframe looks like, for your reference: ![image](https://user-images.githubusercontent.com/52621752/125169237-e2379600-e1c6-11eb-9288-f2a3b7c02fa9.png) Kindly help! Do you see the bug on master? Which version of statsmodels are you using? The solution is to not set `.freq`. Leave this as `None`. > The solution is to not set `.freq`. Leave this as `None`. This helped to get rid of keyError, Thanks a ton!
"2021-07-14T11:44:39Z"
0.12
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_start_param_length", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_summary_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_initialization", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_valid_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[L-BFGS-B]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_direct_holt_add", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[irregular]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_bad_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_set_parameters", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[period]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_minimizer_kwargs_error", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[100]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_integer_array", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[trust-constr]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[basinhopping]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[2000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[least_squares]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[Powell]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_errors", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infer_freq", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_debiased", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_boxcox_components", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infeasible_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[TNC]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_attributes", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[SLSQP]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[1000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_basin_hopping", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[date_range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_basic", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_damping_trend_zero", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_keywords", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_unfixable", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_dampen", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_no_params_to_optimize", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_2d_data", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_buggy", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_simple_exp_smoothing", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_r", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_add_mul", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_predict", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_fit", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_ndarray" ]
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_index" ]
Python
[]
[]
statsmodels/statsmodels
7,581
statsmodels__statsmodels-7581
[ "7199" ]
1745692ba1a092e0646493ab8956ee66e0c43397
diff --git a/statsmodels/base/model.py b/statsmodels/base/model.py --- a/statsmodels/base/model.py +++ b/statsmodels/base/model.py @@ -1075,7 +1075,6 @@ def predict(self, exog=None, transform=True, *args, **kwargs): else: exog_index = [exog.index.name] - if transform and hasattr(self.model, 'formula') and (exog is not None): # allow both location of design_info, see #7043 design_info = (getattr(self.model, "design_info", None) or @@ -2144,7 +2143,7 @@ def load(cls, fname): Parameters ---------- - fname : {str, handle} + fname : {str, handle, pathlib.Path} A string filename or a file handle. Returns diff --git a/statsmodels/iolib/openfile.py b/statsmodels/iolib/openfile.py --- a/statsmodels/iolib/openfile.py +++ b/statsmodels/iolib/openfile.py @@ -1,6 +1,8 @@ """ Handle file opening for read/write """ +from pathlib import Path + from numpy.lib._iotools import _is_string_like @@ -9,15 +11,16 @@ class EmptyContextManager(object): This class is needed to allow file-like object to be used as context manager, but without getting closed. """ + def __init__(self, obj): self._obj = obj def __enter__(self): - '''When entering, return the embedded object''' + """When entering, return the embedded object""" return self._obj def __exit__(self, *args): - '''Do not hide anything''' + """Do not hide anything""" return False def __getattr__(self, name): @@ -25,14 +28,15 @@ def __getattr__(self, name): def _open(fname, mode, encoding): - if fname.endswith('.gz'): + if fname.endswith(".gz"): import gzip + return gzip.open(fname, mode, encoding=encoding) else: return open(fname, mode, encoding=encoding) -def get_file_obj(fname, mode='r', encoding=None): +def get_file_obj(fname, mode="r", encoding=None): """ Light wrapper to handle strings, path objects and let files (anything else) pass through. @@ -54,10 +58,15 @@ def get_file_obj(fname, mode='r', encoding=None): already a file-like object, the returned context manager *will not close the file*. """ + if _is_string_like(fname): - return _open(fname, mode, encoding) + fname = Path(fname) + if isinstance(fname, Path): + return fname.open(mode=mode, encoding=encoding) + elif hasattr(fname, "open"): + return fname.open(mode=mode, encoding=encoding) try: - return open(fname, mode, encoding=encoding) # handle pathlib-like objs + return open(fname, mode, encoding=encoding) except TypeError: try: # Make sure the object has the write methods diff --git a/statsmodels/iolib/smpickle.py b/statsmodels/iolib/smpickle.py --- a/statsmodels/iolib/smpickle.py +++ b/statsmodels/iolib/smpickle.py @@ -8,12 +8,12 @@ def save_pickle(obj, fname): Parameters ---------- - fname : str + fname : {str, pathlib.Path} Filename to pickle to """ import pickle - with get_file_obj(fname, 'wb') as fout: + with get_file_obj(fname, "wb") as fout: pickle.dump(obj, fout, protocol=-1) @@ -29,7 +29,7 @@ def load_pickle(fname): Parameters ---------- - fname : str + fname : {str, pathlib.Path} Filename to unpickle Notes @@ -38,5 +38,5 @@ def load_pickle(fname): """ import pickle - with get_file_obj(fname, 'rb') as fin: + with get_file_obj(fname, "rb") as fin: return pickle.load(fin)
diff --git a/statsmodels/iolib/tests/test_pickle.py b/statsmodels/iolib/tests/test_pickle.py --- a/statsmodels/iolib/tests/test_pickle.py +++ b/statsmodels/iolib/tests/test_pickle.py @@ -1,11 +1,13 @@ +from statsmodels.compat.python import lrange + from io import BytesIO +import os import pathlib import tempfile from numpy.testing import assert_equal -from statsmodels.compat.python import lrange -from statsmodels.iolib.smpickle import save_pickle, load_pickle +from statsmodels.iolib.smpickle import load_pickle, save_pickle def test_pickle(): @@ -26,8 +28,6 @@ def test_pickle(): # cleanup, tested on Windows try: - import os - os.remove(path_str) os.remove(path_pathlib) os.rmdir(tmpdir) @@ -42,3 +42,34 @@ def test_pickle(): d = load_pickle(fh) fh.close() assert_equal(a, d) + + +def test_pickle_supports_open(): + tmpdir = tempfile.mkdtemp(prefix="pickle") + a = lrange(10) + + class SubPath: + def __init__(self, path): + self._path = pathlib.Path(path) + + def open( + self, + mode="r", + buffering=-1, + encoding=None, + errors=None, + newline=None, + ): + return self._path.open( + mode=mode, + buffering=buffering, + encoding=encoding, + errors=errors, + newline=newline, + ) + + # test with pathlib + path_pathlib = SubPath(tmpdir + os.pathsep + "res2.pkl") + save_pickle(a, path_pathlib) + c = load_pickle(path_pathlib) + assert_equal(a, c)
ENH: Allow both pathlib and string paths for file io operations such as load and save Using an object which inherits pathlib.Path and overrides the open method breaks, as it tries to open it with the builtin open method and the given path is from the s3 (this behaviour is implemented in the custom object). ```python import statsmodels.api as sm LOGIT_PATH = URIPath( "some_path" ) sm.load(LOGIT_PATH) class URIPath(pathlib.Path): def open(...): ... handle_s3_case ``` Suggestion: use the .open() method of pathlib.Path
"2021-07-15T16:18:22Z"
0.12
[ "statsmodels/iolib/tests/test_pickle.py::test_pickle" ]
[ "statsmodels/iolib/tests/test_pickle.py::test_pickle_supports_open" ]
Python
[]
[]
statsmodels/statsmodels
7,582
statsmodels__statsmodels-7582
[ "7343" ]
592133c8c494ba844a2cea96ef8ec7e165136cab
diff --git a/statsmodels/graphics/tsaplots.py b/statsmodels/graphics/tsaplots.py --- a/statsmodels/graphics/tsaplots.py +++ b/statsmodels/graphics/tsaplots.py @@ -2,6 +2,7 @@ from statsmodels.compat.pandas import deprecate_kwarg import calendar +import warnings import numpy as np import pandas as pd @@ -245,7 +246,7 @@ def plot_pacf( ax=None, lags=None, alpha=0.05, - method="ywadjusted", + method=None, use_vlines=True, title="Partial Autocorrelation", zero=True, @@ -271,15 +272,21 @@ def plot_pacf( returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to 1/sqrt(len(x)) - method : {'ywunbiased', 'ywmle', 'ols'} + method : str Specifies which method for the calculations to use: - - yw or ywunbiased : yule walker with bias correction in denominator - for acovf. Default. - - ywm or ywmle : yule walker without bias correction - - ols - regression of time series on lags of it and on constant - - ld or ldunbiased : Levinson-Durbin recursion with bias correction - - ldb or ldbiased : Levinson-Durbin recursion without bias correction + - "ywm" or "ywmle" : Yule-Walker without adjustment. Default. + - "yw" or "ywadjusted" : Yule-Walker with sample-size adjustment in + denominator for acovf. Default. + - "ols" : regression of time series on lags of it and on constant. + - "ols-inefficient" : regression of time series on lags using a single + common sample to estimate all pacf coefficients. + - "ols-adjusted" : regression of time series on lags with a bias + adjustment. + - "ld" or "ldadjusted" : Levinson-Durbin recursion with bias + correction. + - "ldb" or "ldbiased" : Levinson-Durbin recursion without bias + correction. use_vlines : bool, optional If True, vertical lines and markers are plotted. @@ -336,6 +343,15 @@ def plot_pacf( .. plot:: plots/graphics_tsa_plot_pacf.py """ + if method is None: + method = "yw" + warnings.warn( + "The default method 'yw' can produce PACF values outside of " + "the [-1,1] interval. After 0.13, the default will change to" + "unadjusted Yule-Walker ('ywm'). You can use this method now " + "by setting method='ywm'.", + FutureWarning, + ) fig, ax = utils.create_mpl_ax(ax) vlines_kwargs = {} if vlines_kwargs is None else vlines_kwargs lags, nlags, irregular = _prepare_data_corr_plot(x, lags, zero)
diff --git a/statsmodels/graphics/tests/test_tsaplots.py b/statsmodels/graphics/tests/test_tsaplots.py --- a/statsmodels/graphics/tests/test_tsaplots.py +++ b/statsmodels/graphics/tests/test_tsaplots.py @@ -71,8 +71,10 @@ def test_plot_pacf(close_figures): armaprocess = tsp.ArmaProcess(ar, ma) rs = np.random.RandomState(1234) pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal) - plot_pacf(pacf, ax=ax) - plot_pacf(pacf, ax=ax, alpha=None) + with pytest.warns(FutureWarning): + plot_pacf(pacf, ax=ax) + with pytest.warns(FutureWarning, match="The default"): + plot_pacf(pacf, ax=ax, alpha=None) @pytest.mark.matplotlib @@ -88,20 +90,23 @@ def test_plot_pacf_kwargs(close_figures): pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal) buff = BytesIO() - plot_pacf(pacf, ax=ax) + with pytest.warns(FutureWarning, match="The default"): + plot_pacf(pacf, ax=ax) fig.savefig(buff, format="rgba") buff_linestyle = BytesIO() fig_linestyle = plt.figure() ax = fig_linestyle.add_subplot(111) - plot_pacf(pacf, ax=ax, ls="-") + with pytest.warns(FutureWarning, match="The default"): + plot_pacf(pacf, ax=ax, ls="-") fig_linestyle.savefig(buff_linestyle, format="rgba") buff_with_vlines = BytesIO() fig_with_vlines = plt.figure() ax = fig_with_vlines.add_subplot(111) vlines_kwargs = {"linestyles": "dashdot"} - plot_pacf(pacf, ax=ax, vlines_kwargs=vlines_kwargs) + with pytest.warns(FutureWarning, match="The default"): + plot_pacf(pacf, ax=ax, vlines_kwargs=vlines_kwargs) fig_with_vlines.savefig(buff_with_vlines, format="rgba") buff.seek(0) @@ -185,9 +190,12 @@ def test_plot_pacf_irregular(close_figures): armaprocess = tsp.ArmaProcess(ar, ma) rs = np.random.RandomState(1234) pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal) - plot_pacf(pacf, ax=ax, lags=np.arange(1, 11)) - plot_pacf(pacf, ax=ax, lags=10, zero=False) - plot_pacf(pacf, ax=ax, alpha=None, zero=False) + with pytest.warns(FutureWarning, match="The default"): + plot_pacf(pacf, ax=ax, lags=np.arange(1, 11)) + with pytest.warns(FutureWarning, match="The default"): + plot_pacf(pacf, ax=ax, lags=10, zero=False) + with pytest.warns(FutureWarning, match="The default"): + plot_pacf(pacf, ax=ax, alpha=None, zero=False) @pytest.mark.matplotlib
BUG: Change default pacf method #### Describe the bug I found the calculated partial autocorrelation would be larger than 1 using sm.graphics.tsa.plot_pacf. #### Code Sample ```python import statsmodels.api as sm import numpy as np import matplotlib.pyplot as plt x=np.linspace(0,1,num=512) f=20 y=np.cos(2*np.pi*f*x) plt.plot(x,y_trend) sm.graphics.tsa.plot_pacf(y_trend, lags=25) ``` <details>
Change the method to "ywmle" or "old". The default option here is a poor choice and can produce insane plats. It should be changed. Thanks for your reply! It works reasonably after changing the method to 'ols' or 'ywmle'. Hello @bashtage , is there a plan to change the default pacf method in the future? Probably by the next release. Thanks for the quick reply.
"2021-07-16T07:02:35Z"
0.12
[ "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[0.1-False-model_and_args1]", "statsmodels/graphics/tests/test_tsaplots.py::test_plot_quarter", "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[0.1-True-model_and_args0]", "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[0.1-False-model_and_args0]", "statsmodels/graphics/tests/test_tsaplots.py::test_plot_acf", "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[None-True-model_and_args0]", "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[None-False-model_and_args0]", "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[None-True-model_and_args1]", "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[0.1-True-model_and_args1]", "statsmodels/graphics/tests/test_tsaplots.py::test_plot_acf_irregular", "statsmodels/graphics/tests/test_tsaplots.py::test_seasonal_plot", "statsmodels/graphics/tests/test_tsaplots.py::test_predict_plot[None-False-model_and_args1]", "statsmodels/graphics/tests/test_tsaplots.py::test_plot_acf_missing", "statsmodels/graphics/tests/test_tsaplots.py::test_plot_acf_kwargs" ]
[ "statsmodels/graphics/tests/test_tsaplots.py::test_plot_pacf_kwargs", "statsmodels/graphics/tests/test_tsaplots.py::test_plot_pacf", "statsmodels/graphics/tests/test_tsaplots.py::test_plot_pacf_irregular" ]
Python
[]
[]
statsmodels/statsmodels
7,612
statsmodels__statsmodels-7612
[ "7610" ]
a316f85004332bd29a7d62358337bce9901a0c51
diff --git a/statsmodels/graphics/agreement.py b/statsmodels/graphics/agreement.py --- a/statsmodels/graphics/agreement.py +++ b/statsmodels/graphics/agreement.py @@ -6,6 +6,7 @@ ''' import numpy as np + from . import utils diff --git a/statsmodels/graphics/api.py b/statsmodels/graphics/api.py --- a/statsmodels/graphics/api.py +++ b/statsmodels/graphics/api.py @@ -1,13 +1,43 @@ -# flake8: noqa -from .functional import hdrboxplot, fboxplot, rainbowplot +from . import tsaplots as tsa +from .agreement import mean_diff_plot +from .boxplots import beanplot, violinplot from .correlation import plot_corr, plot_corr_grid -from .gofplots import qqplot -from .boxplots import violinplot, beanplot -from .regressionplots import (abline_plot, plot_regress_exog, plot_fit, - plot_partregress, plot_partregress_grid, - plot_ccpr, plot_ccpr_grid, influence_plot, - plot_leverage_resid2) from .factorplots import interaction_plot +from .functional import fboxplot, hdrboxplot, rainbowplot +from .gofplots import qqplot from .plottools import rainbow -from . import tsaplots as tsa -from .agreement import mean_diff_plot +from .regressionplots import ( + abline_plot, + influence_plot, + plot_ccpr, + plot_ccpr_grid, + plot_fit, + plot_leverage_resid2, + plot_partregress, + plot_partregress_grid, + plot_regress_exog, +) + +__all__ = [ + "abline_plot", + "beanplot", + "fboxplot", + "hdrboxplot", + "influence_plot", + "interaction_plot", + "mean_diff_plot", + "plot_ccpr", + "plot_ccpr_grid", + "plot_corr", + "plot_corr_grid", + "plot_fit", + "plot_leverage_resid2", + "plot_partregress", + "plot_partregress_grid", + "plot_regress_exog", + "qqplot", + "rainbow", + "rainbowplot", + "tsa", + "violinplot", +] diff --git a/statsmodels/graphics/boxplots.py b/statsmodels/graphics/boxplots.py --- a/statsmodels/graphics/boxplots.py +++ b/statsmodels/graphics/boxplots.py @@ -8,7 +8,6 @@ from . import utils - __all__ = ['violinplot', 'beanplot'] diff --git a/statsmodels/graphics/dotplots.py b/statsmodels/graphics/dotplots.py --- a/statsmodels/graphics/dotplots.py +++ b/statsmodels/graphics/dotplots.py @@ -1,4 +1,5 @@ import numpy as np + from . import utils diff --git a/statsmodels/graphics/factorplots.py b/statsmodels/graphics/factorplots.py --- a/statsmodels/graphics/factorplots.py +++ b/statsmodels/graphics/factorplots.py @@ -3,6 +3,7 @@ Authors: Josef Perktold, Skipper Seabold, Denis A. Engemann """ from statsmodels.compat.python import lrange + import numpy as np from statsmodels.graphics.plottools import rainbow diff --git a/statsmodels/graphics/gofplots.py b/statsmodels/graphics/gofplots.py --- a/statsmodels/graphics/gofplots.py +++ b/statsmodels/graphics/gofplots.py @@ -706,10 +706,12 @@ def qqplot_2samples( Parameters ---------- data1 : {array_like, ProbPlot} - Data to plot along x axis. + Data to plot along x axis. If the sample sizes are unequal, the longer + series is always plotted along the x-axis. data2 : {array_like, ProbPlot} Data to plot along y axis. Does not need to have the same number of - observations as data 1. + observations as data 1. If the sample sizes are unequal, the longer + series is always plotted along the x-axis. xlabel : {None, str} User-provided labels for the x-axis. If None (default), other values are used. @@ -772,7 +774,7 @@ def qqplot_2samples( if not isinstance(data2, ProbPlot): data2 = ProbPlot(data2) - if data2.data.shape[0] >= data1.data.shape[0]: + if data2.data.shape[0] > data1.data.shape[0]: fig = data1.qqplot( xlabel=ylabel, ylabel=xlabel, line=line, other=data2, ax=ax ) diff --git a/statsmodels/graphics/mosaicplot.py b/statsmodels/graphics/mosaicplot.py --- a/statsmodels/graphics/mosaicplot.py +++ b/statsmodels/graphics/mosaicplot.py @@ -8,13 +8,15 @@ # Author: Enrico Giampieri - 21 Jan 2013 from statsmodels.compat.python import lrange, lzip -import numpy as np + from itertools import product -from numpy import iterable, r_, cumsum, array -from statsmodels.graphics import utils +import numpy as np +from numpy import array, cumsum, iterable, r_ from pandas import DataFrame +from statsmodels.graphics import utils + __all__ = ["mosaic"] @@ -620,6 +622,7 @@ def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005, " See examples.") from matplotlib.patches import Rectangle + #from pylab import Rectangle fig, ax = utils.create_mpl_ax(ax) # normalize the data to a dict with tuple of strings as keys diff --git a/statsmodels/graphics/plot_grids.py b/statsmodels/graphics/plot_grids.py --- a/statsmodels/graphics/plot_grids.py +++ b/statsmodels/graphics/plot_grids.py @@ -16,7 +16,6 @@ from . import utils - __all__ = ['scatter_ellipse'] diff --git a/statsmodels/graphics/plottools.py b/statsmodels/graphics/plottools.py --- a/statsmodels/graphics/plottools.py +++ b/statsmodels/graphics/plottools.py @@ -1,5 +1,6 @@ import numpy as np + def rainbow(n): """ Returns a list of colors sampled at equal intervals over the spectrum. diff --git a/statsmodels/graphics/regressionplots.py b/statsmodels/graphics/regressionplots.py --- a/statsmodels/graphics/regressionplots.py +++ b/statsmodels/graphics/regressionplots.py @@ -10,27 +10,28 @@ 2011-10-27 : docstrings ''' -from statsmodels.compat.python import lrange, lzip from statsmodels.compat.pandas import Appender +from statsmodels.compat.python import lrange, lzip import numpy as np import pandas as pd from patsy import dmatrix -from statsmodels.regression.linear_model import OLS, GLS, WLS -from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.generalized_estimating_equations import GEE -from statsmodels.sandbox.regression.predstd import wls_prediction_std +from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.graphics import utils from statsmodels.nonparametric.smoothers_lowess import lowess +from statsmodels.regression.linear_model import GLS, OLS, WLS +from statsmodels.sandbox.regression.predstd import wls_prediction_std from statsmodels.tools.tools import maybe_unwrap_results from ._regressionplots_doc import ( _plot_added_variable_doc, - _plot_partial_residuals_doc, _plot_ceres_residuals_doc, _plot_influence_doc, - _plot_leverage_resid2_doc) + _plot_leverage_resid2_doc, + _plot_partial_residuals_doc, +) __all__ = ['plot_fit', 'plot_regress_exog', 'plot_partregress', 'plot_ccpr', 'plot_regress_exog', 'plot_partregress_grid', 'plot_ccpr_grid', @@ -929,7 +930,7 @@ def influence_plot(results, external=True, alpha=.05, criterion="cooks", def _plot_leverage_resid2(results, influence, alpha=.05, ax=None, **kwargs): - from scipy.stats import zscore, norm + from scipy.stats import norm, zscore fig, ax = utils.create_mpl_ax(ax) infl = influence diff --git a/statsmodels/graphics/tukeyplot.py b/statsmodels/graphics/tukeyplot.py --- a/statsmodels/graphics/tukeyplot.py +++ b/statsmodels/graphics/tukeyplot.py @@ -1,6 +1,6 @@ -import numpy as np -import matplotlib.pyplot as plt import matplotlib.lines as lines +import matplotlib.pyplot as plt +import numpy as np def tukeyplot(results, dim=None, yticklabels=None):
diff --git a/statsmodels/graphics/tests/test_agreement.py b/statsmodels/graphics/tests/test_agreement.py --- a/statsmodels/graphics/tests/test_agreement.py +++ b/statsmodels/graphics/tests/test_agreement.py @@ -4,7 +4,6 @@ from statsmodels.graphics.agreement import mean_diff_plot - try: import matplotlib.pyplot as plt except ImportError: diff --git a/statsmodels/graphics/tests/test_boxplots.py b/statsmodels/graphics/tests/test_boxplots.py --- a/statsmodels/graphics/tests/test_boxplots.py +++ b/statsmodels/graphics/tests/test_boxplots.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from statsmodels.graphics.boxplots import violinplot, beanplot from statsmodels.datasets import anes96 +from statsmodels.graphics.boxplots import beanplot, violinplot try: import matplotlib.pyplot as plt diff --git a/statsmodels/graphics/tests/test_correlation.py b/statsmodels/graphics/tests/test_correlation.py --- a/statsmodels/graphics/tests/test_correlation.py +++ b/statsmodels/graphics/tests/test_correlation.py @@ -1,8 +1,8 @@ import numpy as np import pytest -from statsmodels.graphics.correlation import plot_corr, plot_corr_grid from statsmodels.datasets import randhie +from statsmodels.graphics.correlation import plot_corr, plot_corr_grid @pytest.mark.matplotlib diff --git a/statsmodels/graphics/tests/test_dotplot.py b/statsmodels/graphics/tests/test_dotplot.py --- a/statsmodels/graphics/tests/test_dotplot.py +++ b/statsmodels/graphics/tests/test_dotplot.py @@ -1,8 +1,8 @@ import numpy as np +import pandas as pd import pytest from statsmodels.graphics.dotplots import dot_plot -import pandas as pd # If true, the output is written to a multi-page pdf file. pdf_output = False diff --git a/statsmodels/graphics/tests/test_factorplots.py b/statsmodels/graphics/tests/test_factorplots.py --- a/statsmodels/graphics/tests/test_factorplots.py +++ b/statsmodels/graphics/tests/test_factorplots.py @@ -1,9 +1,9 @@ import numpy as np -from numpy.testing import assert_raises, assert_equal +from numpy.testing import assert_equal, assert_raises from pandas import Series import pytest -from statsmodels.graphics.factorplots import interaction_plot, _recode +from statsmodels.graphics.factorplots import _recode, interaction_plot try: import matplotlib.pyplot as plt diff --git a/statsmodels/graphics/tests/test_gofplots.py b/statsmodels/graphics/tests/test_gofplots.py --- a/statsmodels/graphics/tests/test_gofplots.py +++ b/statsmodels/graphics/tests/test_gofplots.py @@ -647,7 +647,7 @@ def test_correct_labels( ax = fig.get_axes()[0] x_label = ax.get_xlabel() y_label = ax.get_ylabel() - if x_size <= y_size: + if x_size < y_size: if not labels: assert "2nd" in x_label assert "1st" in y_label @@ -661,3 +661,28 @@ def test_correct_labels( else: assert "X" in x_label assert "Y" in y_label + + [email protected] +def test_axis_order(close_figures): + xx = np.random.normal(10, 1, (100,)) + xy = np.random.normal(1, 0.01, (100,)) + fig = qqplot_2samples(xx, xy, "x", "y") + ax = fig.get_axes()[0] + y_range = np.diff(ax.get_ylim())[0] + x_range = np.diff(ax.get_xlim())[0] + assert y_range < x_range + + xx_long = np.random.normal(10, 1, (1000,)) + fig = qqplot_2samples(xx_long, xy, "x", "y") + ax = fig.get_axes()[0] + y_range = np.diff(ax.get_ylim())[0] + x_range = np.diff(ax.get_xlim())[0] + assert y_range < x_range + + xy_long = np.random.normal(1, 0.01, (1000,)) + fig = qqplot_2samples(xx, xy_long, "x", "y") + ax = fig.get_axes()[0] + y_range = np.diff(ax.get_ylim())[0] + x_range = np.diff(ax.get_xlim())[0] + assert x_range < y_range diff --git a/statsmodels/graphics/tests/test_regressionplots.py b/statsmodels/graphics/tests/test_regressionplots.py --- a/statsmodels/graphics/tests/test_regressionplots.py +++ b/statsmodels/graphics/tests/test_regressionplots.py @@ -1,13 +1,23 @@ import numpy as np +from numpy.testing import assert_array_less, assert_equal, assert_raises +from pandas import DataFrame, Series import pytest + import statsmodels.api as sm -from numpy.testing import assert_equal, assert_raises, assert_array_less -from statsmodels.graphics.regressionplots import (plot_fit, plot_ccpr, - plot_regress_exog, abline_plot, - plot_partregress_grid, plot_ccpr_grid, add_lowess, - plot_added_variable, plot_partial_residuals, - plot_ceres_residuals, influence_plot, plot_leverage_resid2) -from pandas import Series, DataFrame +from statsmodels.graphics.regressionplots import ( + abline_plot, + add_lowess, + influence_plot, + plot_added_variable, + plot_ccpr, + plot_ccpr_grid, + plot_ceres_residuals, + plot_fit, + plot_leverage_resid2, + plot_partial_residuals, + plot_partregress_grid, + plot_regress_exog, +) try: import matplotlib.pyplot as plt
Mistake in labels of qqplot_2samples #### Describe the bug statsmodels.graphics.gofplots.qqplot_2samples docs have a bug. data1 appears on y-axis and data2 appears on x-axis. This is contrary to what is mentioned in the docs. #### Code Sample, a copy-pastable example if possible ```python import matplotlib.pypot as plt import numpy as np from statsmodels.graphics.gofplots import qqplot_2samples x=np.random.normal(10,1,(100,)) y=np.random.normal(1,.1,(100,)) qqplot_2samples(xx, xy, 'x', 'y') plt.show() ```
"2021-07-30T11:02:33Z"
0.12
[ "statsmodels/graphics/tests/test_regressionplots.py::TestCERESPlot::test_ceres_poisson", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_model", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_ab_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_model_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_remove", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_ab", "statsmodels/graphics/tests/test_regressionplots.py::TestAddedVariablePlot::test_added_variable_poisson", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_fit", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_influence", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_leverage_resid2", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_oth", "statsmodels/graphics/tests/test_regressionplots.py::TestPartialResidualPlot::test_partial_residual_poisson", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_model", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_ab", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_model_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_remove", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_ab_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_leverage_resid2", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_fit", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_oth", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_influence", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_leverage_resid2", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_oth", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_fit", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_influence", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_one_column_exog", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot_2samples_arrays", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot_2samples_prob_plot_objects", "statsmodels/graphics/tests/test_gofplots.py::TestTopLevel::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalWithFit::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyWithFit::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotLongelyNoFit::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_exceptions", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_loc_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_scale_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalFullDist::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::test_qqplot_unequal", "statsmodels/graphics/tests/test_gofplots.py::test_invalid_dist_config", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_param_unpacking", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_with_ax", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_baseline", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_plot_qq_line", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_step_baseline", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_step_qq_line", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_step_full_options", "statsmodels/graphics/tests/test_gofplots.py::TestDoPlot::test_plot_full_options", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalMinimal::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_q_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_s_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_non45_no_y", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_45_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_r_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_non45_no_x_no_y", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_r", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_s_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_non45_no_x", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_q_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_s", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_badline", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_r_fmt_lineoptions", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_q", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_45", "statsmodels/graphics/tests/test_gofplots.py::TestQQLine::test_45_fmt", "statsmodels/graphics/tests/test_gofplots.py::TestCompareSamplesDifferentSize::test_ppplot", "statsmodels/graphics/tests/test_gofplots.py::TestCompareSamplesDifferentSize::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestCheckDist::test_good", "statsmodels/graphics/tests/test_gofplots.py::TestCheckDist::test_bad", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_lininterp", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_piecewise", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_approx_med_unbiased", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_weibull", "statsmodels/graphics/tests/test_gofplots.py::TestPlottingPosition::test_cunnane", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_loc_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_loc_set_in_dist", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_scale_set_in_dist", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot_exceed", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_fit_params", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_scale_set", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_other_prbplt", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot_custom_labels", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_qqplot_other_array", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_probplot_pltkwargs", "statsmodels/graphics/tests/test_gofplots.py::TestProbPlotRandomNormalLocScaleDist::test_ppplot", "statsmodels/graphics/tests/test_dotplot.py::test_all", "statsmodels/graphics/tests/test_boxplots.py::test_beanplot_side_right", "statsmodels/graphics/tests/test_boxplots.py::test_violinplot_bw_factor", "statsmodels/graphics/tests/test_boxplots.py::test_beanplot_jitter", "statsmodels/graphics/tests/test_boxplots.py::test_violinplot", "statsmodels/graphics/tests/test_boxplots.py::test_beanplot_side_left", "statsmodels/graphics/tests/test_boxplots.py::test_beanplot", "statsmodels/graphics/tests/test_boxplots.py::test_beanplot_legend_text", "statsmodels/graphics/tests/test_agreement.py::test_mean_diff_plot", "statsmodels/graphics/tests/test_correlation.py::test_plot_corr_grid", "statsmodels/graphics/tests/test_correlation.py::test_plot_corr", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_plot_rainbow", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_plot_pandas[int]", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_plot_pandas[str]", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_plot_both", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_recode_series", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_formatting", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_formatting_errors", "statsmodels/graphics/tests/test_factorplots.py::TestInteractionPlot::test_plottype" ]
[ "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_axis_order", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[q-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-50-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[None-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-50-50-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[r-30-30-labels0]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[s-30-30-labels1]", "statsmodels/graphics/tests/test_gofplots.py::test_correct_labels[45-30-30-labels1]" ]
Python
[]
[]
statsmodels/statsmodels
7,668
statsmodels__statsmodels-7668
[ "7466" ]
b05b9df816a7d0e4adba791543eaa1d2525bb7c3
diff --git a/statsmodels/stats/multitest.py b/statsmodels/stats/multitest.py --- a/statsmodels/stats/multitest.py +++ b/statsmodels/stats/multitest.py @@ -152,7 +152,7 @@ def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False, elif method.lower() in ['s', 'sidak']: reject = pvals <= alphacSidak - pvals_corrected = 1 - np.power((1. - pvals), ntests) + pvals_corrected = -np.expm1(ntests * np.log1p(-pvals)) elif method.lower() in ['hs', 'holm-sidak']: alphacSidak_all = 1 - np.power((1. - alphaf), @@ -170,8 +170,11 @@ def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False, reject = ~notreject del notreject - pvals_corrected_raw = 1 - np.power((1. - pvals), - np.arange(ntests, 0, -1)) + # It's eqivalent to 1 - np.power((1. - pvals), + # np.arange(ntests, 0, -1)) + # but prevents the issue of the floating point precision + pvals_corrected_raw = -np.expm1(np.arange(ntests, 0, -1) * + np.log1p(-pvals)) pvals_corrected = np.maximum.accumulate(pvals_corrected_raw) del pvals_corrected_raw
diff --git a/statsmodels/stats/tests/test_multi.py b/statsmodels/stats/tests/test_multi.py --- a/statsmodels/stats/tests/test_multi.py +++ b/statsmodels/stats/tests/test_multi.py @@ -343,6 +343,14 @@ def test_issorted(method): assert_allclose(res2[0][sortrevind], res1[0], rtol=1e-10) [email protected]('method', sorted(multitest_methods_names)) +def test_floating_precision(method): + # issue #7465 + pvals = np.full(6000, 0.99) + pvals[0] = 1.138569e-56 + assert multipletests(pvals, method=method)[1][0] > 1e-60 + + def test_tukeyhsd(): # example multicomp in R p 83
handle multitests floating point precision issue #7465 - [ ] closes #7465 - [ ] tests added / passed. - [ ] code/documentation is well formatted. - [ ] properly formatted commit message. See [NumPy's guide](https://docs.scipy.org/doc/numpy-1.15.1/dev/gitwash/development_workflow.html#writing-the-commit-message). <details> **Notes**: * It is essential that you add a test when making code changes. Tests are not needed for doc changes. * When adding a new function, test values should usually be verified in another package (e.g., R/SAS/Stata). * When fixing a bug, you must add a test that would produce the bug in main and then show that it is fixed with the new code. * New code additions must be well formatted. Changes should pass flake8. If on Linux or OSX, you can verify you changes are well formatted by running ``` git diff upstream/main -u -- "*.py" | flake8 --diff --isolated ``` assuming `flake8` is installed. This command is also available on Windows using the Windows System for Linux once `flake8` is installed in the local Linux environment. While passing this test is not required, it is good practice and it help improve code quality in `statsmodels`. * Docstring additions must render correctly, including escapes and LaTeX. </details>
"2021-08-24T15:40:32Z"
0.12
[ "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[rawp-val8]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[rawp-val8]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[rawp-val8]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_bh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-True-False]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_issorted[hs]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-False-False]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[b]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_gbs]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[sh]", "statsmodels/stats/tests/test_multi.py::test_tukeyhsd", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-False-True]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-False-True]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_tsbky]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_by]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-True-False]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_bh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_issorted[ho]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-True-True]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_hommel", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_issorted[sh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_fdr_bky", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_by]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_null_distribution", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_tsbh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_tsbky]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_issorted[b]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[ho]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_gbs]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[h]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_local_fdr", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-False-False]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_issorted[h]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-True-True]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_tsbh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_issorted[s]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[rawp-val8]" ]
[ "statsmodels/stats/tests/test_multi.py::test_floating_precision[s]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[hs]" ]
Python
[]
[]
statsmodels/statsmodels
7,670
statsmodels__statsmodels-7670
[ "4952" ]
be535762961792866f2ade882445f46ea716c665
diff --git a/statsmodels/discrete/count_model.py b/statsmodels/discrete/count_model.py --- a/statsmodels/discrete/count_model.py +++ b/statsmodels/discrete/count_model.py @@ -60,11 +60,13 @@ def __init__(self, endog, exog, exog_infl=None, offset=None, if exog_infl is None: self.k_inflate = 1 + self._no_exog_infl = True self.exog_infl = np.ones((endog.size, self.k_inflate), dtype=np.float64) else: self.exog_infl = exog_infl self.k_inflate = exog_infl.shape[1] + self._no_exog_infl = False if len(exog.shape) == 1: self.k_exog = 1 @@ -387,19 +389,35 @@ def predict(self, params, exog=None, exog_infl=None, exposure=None, Notes ----- """ + no_exog = False if exog is None: + no_exog = True exog = self.exog if exog_infl is None: - exog_infl = self.exog_infl + if no_exog: + exog_infl = self.exog_infl + else: + if self._no_exog_infl: + exog_infl = np.ones((len(exog), 1)) + else: + exog_infl = np.asarray(exog_infl) + if exog_infl.ndim == 1 and self.k_inflate == 1: + exog_infl = exog_infl[:, None] if exposure is None: - exposure = getattr(self, 'exposure', 0) + if no_exog: + exposure = getattr(self, 'exposure', 0) + else: + exposure = 0 else: exposure = np.log(exposure) if offset is None: - offset = 0 + if no_exog: + offset = getattr(self, 'offset', 0) + else: + offset = 0 params_infl = params[:self.k_inflate] params_main = params[self.k_inflate:]
diff --git a/statsmodels/discrete/tests/test_count_model.py b/statsmodels/discrete/tests/test_count_model.py --- a/statsmodels/discrete/tests/test_count_model.py +++ b/statsmodels/discrete/tests/test_count_model.py @@ -144,19 +144,18 @@ def test_exposure(self): assert_allclose(res3.params, self.res1.params, atol=1e-6, rtol=1e-6) fitted1 = self.res1.predict() - fitted3 = self.res1.predict() + fitted3 = res3.predict() assert_allclose(fitted3, fitted1, atol=1e-6, rtol=1e-6) ex = model1.exog ex_infl = model1.exog_infl offset = model1.offset fitted1_0 = self.res1.predict(exog=ex, exog_infl=ex_infl, - offset=offset) + offset=offset.tolist()) fitted3_0 = res3.predict(exog=ex, exog_infl=ex_infl, exposure=np.exp(offset)) assert_allclose(fitted3_0, fitted1_0, atol=1e-6, rtol=1e-6) - ex = model1.exog[:10:2] ex_infl = model1.exog_infl[:10:2] offset = offset[:10:2] @@ -175,6 +174,11 @@ def test_exposure(self): assert_allclose(fitted1_2, fitted1[:10:2], atol=1e-6, rtol=1e-6) assert_allclose(fitted3_2, fitted1[:10:2], atol=1e-6, rtol=1e-6) + # without specifying offset and exposure + fitted1_3 = self.res1.predict(exog=ex, exog_infl=ex_infl) + fitted3_3 = res3.predict(exog=ex, exog_infl=ex_infl) + assert_allclose(fitted3_3, fitted1_3, atol=1e-6, rtol=1e-6) + class TestZeroInflatedModelPandas(CheckGeneric): @classmethod @@ -276,6 +280,18 @@ def test_predict_prob(self): res.predict(), 0.05).T assert_allclose(pr, pr2, rtol=0.05, atol=0.05) + def test_predict_options(self): + # check default exog_infl, see #4757 + res = self.res + n = 5 + pr1 = res.predict(which='prob') + pr0 = res.predict(exog=res.model.exog[:n], which='prob') + assert_allclose(pr0, pr1[:n], rtol=1e-10) + + fitted1 = res.predict() + fitted0 = res.predict(exog=res.model.exog[:n]) + assert_allclose(fitted0, fitted1[:n], rtol=1e-10) + @pytest.mark.slow class TestZeroInflatedGeneralizedPoisson(CheckGeneric):
Zero-Inflated Poisson/Negative Binomial: not predicting for arrays of different size? **edit** same bug as #4757 Hi, I am modelling a discrete skewed distribution with several of the available methods in `statsmodels`, namely: `Poisson`, `Negative Binomial`, `Zero Inflated Poisson`, and `Zero Inflated Negative Binomial`. What I am doing is training with 60% of my samples, and testing the model with the remaining 40%. For this 40%, I obtain the RMSE to have a performance metric for each method. There are no problems with the Poisson and the Negative Binomial: I use the `.fit()` method on my training samples, and the `.predict()` method on the test samples. However, I observed that for the Zero-Inflated models, **I cannot predict for the testing set**, as you can see in this snippet: ``` print("Model: Zero Inflated Poisson") zip_mod = sm.ZeroInflatedPoisson(ytrain, xtrain).fit(method="newton", maxiter=50) **zip_mean_pred = zip_mod.predict(xtest)** zip_ppf_obs = stats.poisson.ppf(q=0.95, mu=zip_mean_pred) zip_rmse = np.sqrt(mean_squared_error(ytest, zip_ppf_obs)) print("RMSE Zero-Inflated Poisson", zip_rmse) print("Model: Zero Inflated Neg. Binomial") zinb_mod = sm.ZeroInflatedNegativeBinomialP(ytrain, xtrain).fit(method="newton", maxiter=50) zinb_pred = zinb_mod.predict(xtest) zinb_rmse = np.sqrt(mean_squared_error(ytest, zinb_pred)) print("RMSE Zero-Inflated Negative Binomial: ", zinb_rmse) ``` The error that I get says: ``` Traceback (most recent call last): File "/home/irene/.../four_models.py", line 569, in <module> test_poi_nb_zip_zinb_tiny_subset(meta_m, m) File "/home/irene/.../four_models.py", line 325, in test_poi_nb_zip_zinb_tiny_subset **zinb_pred = zinb_mod.predict(xtest)** File "/home/irene/.conda/envs/defaultpy34/lib/python2.7/site-packages/statsmodels/base/model.py", line 854, in predict **kwargs) File "/home/irene/.conda/envs/defaultpy34/lib/python2.7/site-packages/statsmodels/discrete/count_model.py", line 444, in predict prob_zero = (1 - prob_main) + prob_main * np.exp(llf) **ValueError: operands could not be broadcast together with shapes (480,) (320,)** ``` Perhaps I am doing something wrong, but I do not understand why these two methods require a test sample for the predict method as big as the one used in the fitting. Is there any way to pass to these methods a test array with a different size than the training? I tried training and testing with the same dataset and it works. Thanks a lot for all the work! :-)
I forgot to mention that the same procedure that you see here for the zero inflated models is applied above it to the normal poisson and negative binomial with not problem. Thanks. Hi, there is a good chance that this is a bug. Given that the models are new, there might be cases that are not included in the test suite and that could be buggy. Can you make a reproducible example? It can have random number, just so I can reproduce the shape mismatch. (temporarily prio-high, at least until I know what's going on.) Based on a quick look at the source. If exog_infl is not specified in predict, then the one of the model is used which will conflict with the predict exog. Try to specify `exog_infl`. In the case of constant inflation probability as in your case, you need `exog_infl=np.ones(len(exog_predict))` the same is true for `exposure`, which also uses the model exposure if it is available but no predict exposure is specified. Thanks for the quick response! I made the following dummy code causing the error I was mentioning, I hope it is useful to trace the bug in the code. ``` import numpy as np import statsmodels.api as sm from scipy import stats from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split X = np.random.randint(99, size=(800, 21)) Y = np.random.randint(2, size=(800, 1)) xtrain, xtest, ytrain, ytest = train_test_split(X, Y, train_size=0.60, random_state=42) print(xtrain.shape, ytrain.shape, xtest.shape, ytest.shape) print("Model: Zero Inflated Poisson") zip_mod = sm.ZeroInflatedPoisson(ytrain, xtrain).fit(method="newton", maxiter=50) zip_mean_pred = zip_mod.predict(xtest) zip_ppf_obs = stats.poisson.ppf(q=0.95, mu=zip_mean_pred) zip_rmse = np.sqrt(mean_squared_error(ytest, zip_ppf_obs)) print("Model: Zero Inflated Neg. Binomial") zinb_mod = sm.ZeroInflatedNegativeBinomialP(ytrain, xtrain).fit(method="newton", maxiter=50) zinb_pred = zinb_mod.predict(xtest) zinb_rmse = np.sqrt(mean_squared_error(ytest, zinb_pred)) print("RMSE ZIP", zip_rmse) print("RMSE ZINB: ", zinb_rmse) ``` If I add to the `.predict()` methods the `exog_infl` as follows: ``` zip_mean_pred = zip_mod.predict(xtest, exog_infl=np.ones(len(xtest))) zinb_pred = zinb_mod.predict(xtest, exog_infl=np.ones(len(xtest))) ``` I get the following error: ``` Traceback (most recent call last): File "/home/irene/.../test_zip_zinb.py", line 29, in <module> zip_mean_pred = zip_mod.predict(xtest, exog_infl=np.ones(len(xtest))) File "/home/irene/.conda/envs/defaultpy34/lib/python2.7/site-packages/statsmodels/base/model.py", line 854, in predict **kwargs) File "/home/irene/.conda/envs/defaultpy34/lib/python2.7/site-packages/statsmodels/discrete/count_model.py", line 415, in predict prob_main = 1 - self.model_infl.predict(params_infl, exog_infl) File "/home/irene/.conda/envs/defaultpy34/lib/python2.7/site-packages/statsmodels/discrete/discrete_model.py", line 447, in predict return self.cdf(np.dot(exog, params)) ValueError: shapes (320,) and (1,) not aligned: 320 (dim 0) != 1 (dim 0) ``` The `.shape` of the `np.ones(len(xtest))` is `(320,)` just in case it is important. I just tried it. exog_infl in predict needs to be 2dim `zip_mean_pred = zip_mod.predict(xtest, exog_infl=np.ones((len(xtest), 1)))` this works, but the example has convergence problems or hessian inversion problem if I use method='nm' as optimizer. (The data doesn't generate anything close to ZIP or ZINB) general aside: Estimation of a NegativBinomial(P) model and ZI variant **needs** overdispersion relative to Poisson in the data. If there is no overdispersion, then the loglike and derivative computation end with a zero division. Poisson is the limit of NegativeBinomial as overdispersion disappears, but computationally we need to stay away from that boundary case. GeneralizedPoisson and it's ZI version allow for overdispersion, no excess dispersion and a small amount of underdispersion, i.e. the Poisson special case is in the interior of the GP parameter space. (In the zi version excess dispersion refers to the non-zero inflated part.) I tried your fix and it works! ``` print("Model: Zero Inflated Poisson") zip_mod = sm.ZeroInflatedPoisson(ytrain, xtrain).fit(method="nm", maxiter=50) zip_mean_pred = zip_mod.predict(xtest, exog_infl=np.ones((len(xtest), 1))) zip_ppf_obs = stats.poisson.ppf(q=0.95, mu=zip_mean_pred) zip_rmse = np.sqrt(mean_squared_error(ytest, zip_ppf_obs)) print("Model: Zero Inflated Neg. Binomial") zinb_mod = sm.ZeroInflatedNegativeBinomialP(ytrain, xtrain).fit(method="nm", maxiter=50) zinb_pred = zinb_mod.predict(xtest, exog_infl=np.ones((len(xtest), 1))) zinb_rmse = np.sqrt(mean_squared_error(ytest, zinb_pred)) ``` It gives some warnings as you said, like this: ``` HessianInversionWarning: Inverting hessian failed, no bse or cov_params available 'available', HessianInversionWarning) ``` But it processes my testing dataset. Tried the "nm" and "newton" optimizers. So now i can compute the RMSE: ``` RMSE ZIP: 1.5642490210960658 RMSE ZINB: 0.50817991749367819 ``` Thanks for the super fast reaction, much appreciated! PS - Regarding this "overdispersion", is there any paper/manual you recommend to learn more about it? :-) > PS - Regarding this "overdispersion", is there any paper/manual you recommend to learn more about it? If you do a google search for "overdispersion" or "overdispersion poisson", then you will find some introductory explanation, e.g. I never read this http://biometry.github.io/APES//LectureNotes/2016-JAGS/Overdispersion/OverdispersionJAGS.html Zero-inflation can cause overdispersion if we just estimate a poisson model. If we estimate a zero-inflated model, then the zero inflation is handled separately but there can still be a (over-, under-) dispersion issue for the not inflated part. I don't have directly a reference because I haven't looked at it recently, and most of my references are more technical for specific issues (e.g. what we don't have yet is testing zero-inflated Poisson versus zero-inflated NegativeBinomial) Here are links to two of my notebooks which are a mix of something I started to write for "public" consumption, and then often ended up investigating some specific problems. Those were written when I reviewed and finished up the count model PR and I'm not sure those notebooks are up to date with current statsmodels. https://gist.github.com/josef-pkt/c932904296270d75366a24ee92a4eb2f https://gist.github.com/josef-pkt/b528c099592bdb0239e6e750d365f7fc
"2021-08-24T23:30:19Z"
0.12
[ "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_logit::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_var", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_predict_prob", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict::test_predict_generic_zi", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_names", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModelPandas::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_probit::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_minimize", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP::test_minimize", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_params", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_summary", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_fit_regularized", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_null", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_t", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_init_keys", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_llf", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_bse", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_conf_int", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_bic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_aic", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict2::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedNegativeBinomialP_predict2::test_zero_nonzero_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson_predict::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson_predict::test_var", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedGeneralizedPoisson_predict::test_predict_prob", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedPoisson_predict::test_mean", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedPoisson_predict::test_var", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedPoisson_predict::test_predict_prob", "statsmodels/discrete/tests/test_count_model.py::TestPandasOffset::test_pd_offset_exposure" ]
[ "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedModel_offset::test_exposure", "statsmodels/discrete/tests/test_count_model.py::TestZeroInflatedPoisson_predict::test_predict_options" ]
Python
[]
[]
statsmodels/statsmodels
7,673
statsmodels__statsmodels-7673
[ "7672" ]
b2397a21cd9ffcf1f52593380816d3bdfd8a7f0d
diff --git a/statsmodels/graphics/regressionplots.py b/statsmodels/graphics/regressionplots.py --- a/statsmodels/graphics/regressionplots.py +++ b/statsmodels/graphics/regressionplots.py @@ -301,7 +301,7 @@ def _partial_regression(endog, exog_i, exog_others): def plot_partregress(endog, exog_i, exog_others, data=None, title_kwargs={}, obs_labels=True, label_kwargs={}, - ax=None, ret_coords=False, **kwargs): + ax=None, ret_coords=False, eval_env=1, **kwargs): """Plot partial regression for a single regressor. Parameters @@ -337,6 +337,9 @@ def plot_partregress(endog, exog_i, exog_others, data=None, ret_coords : bool If True will return the coordinates of the points in the plot. You can use this to add your own annotations. + eval_env : int + Patsy eval environment if user functions and formulas are used in + defining endog or exog. **kwargs The keyword arguments passed to plot for the points. @@ -384,16 +387,16 @@ def plot_partregress(endog, exog_i, exog_others, data=None, #NOTE: there is no interaction between possible missing data and #obs_labels yet, so this will need to be tweaked a bit for this case fig, ax = utils.create_mpl_ax(ax) - + print("eval_env:", eval_env) # strings, use patsy to transform to data if isinstance(endog, str): - endog = dmatrix(endog + "-1", data) + endog = dmatrix(endog + "-1", data, eval_env=eval_env) if isinstance(exog_others, str): - RHS = dmatrix(exog_others, data) + RHS = dmatrix(exog_others, data, eval_env=eval_env) elif isinstance(exog_others, list): RHS = "+".join(exog_others) - RHS = dmatrix(RHS, data) + RHS = dmatrix(RHS, data, eval_env=eval_env) else: RHS = exog_others RHS_isemtpy = False @@ -402,7 +405,7 @@ def plot_partregress(endog, exog_i, exog_others, data=None, elif isinstance(RHS, pd.DataFrame) and RHS.empty: RHS_isemtpy = True if isinstance(exog_i, str): - exog_i = dmatrix(exog_i + "-1", data) + exog_i = dmatrix(exog_i + "-1", data, eval_env=eval_env) # all arrays or pandas-like
diff --git a/statsmodels/graphics/tests/test_regressionplots.py b/statsmodels/graphics/tests/test_regressionplots.py --- a/statsmodels/graphics/tests/test_regressionplots.py +++ b/statsmodels/graphics/tests/test_regressionplots.py @@ -348,3 +348,25 @@ def test_ceres_poisson(self, close_figures): ax.set_title(ti + "\nPoisson regression\n" + effect_str) close_or_save(pdf, fig) + + [email protected] +def test_partregress_formula_env(): + # test that user function in formulas work, see #7672 + + @np.vectorize + def lg(x): + return np.log10(x) if x > 0 else 0 + + df = DataFrame( + dict( + a=np.random.random(size=10), + b=np.random.random(size=10), + c=np.random.random(size=10), + ) + ) + sm.graphics.plot_partregress( + "a", "lg(b)", ["c"], obs_labels=False, data=df, eval_env=1) + + sm.graphics.plot_partregress( + "a", "lg(b)", ["c"], obs_labels=False, data=df)
PatsyError with custom function when trying to plot partial regression #### Describe the bug I am experiencing an `PatsyError` where `patsy` dose not recognize the custom function `lg()` in the name space. This happened with the `statsmodels.api.graphics.plot_partregress()` function. #### Code Sample, a copy-pastable example if possible ```python import statsmodels import statsmodels.api as sm import patsy from patsy import dmatrix, dmatrices import pandas as pd import numpy as np @np.vectorize def lg(x): return np.log10(x) if x > 0 else 0 df = pd.DataFrame( dict( a=np.random.random(size=10), b=np.random.random(size=10), c=np.random.random(size=10), ) ) sm.graphics.plot_partregress( "a", "lg(b)", ["c"], obs_labels=False, data=df ) ``` <details> I tried to add kwarg `eval_env=1` (as well as 2 and 3), but the error remains. ``` python --------------------------------------------------------------------------- NameError Traceback (most recent call last) /usr/local/lib/python3.7/dist-packages/patsy/compat.py in call_and_wrap_exc(msg, origin, f, *args, **kwargs) 35 try: ---> 36 return f(*args, **kwargs) 37 except Exception as e: 11 frames /usr/local/lib/python3.7/dist-packages/patsy/eval.py in eval(self, expr, source_name, inner_namespace) 165 return eval(code, {}, VarLookupDict([inner_namespace] --> 166 + self._namespaces)) 167 <string> in <module>() NameError: name 'lg' is not defined The above exception was the direct cause of the following exception: PatsyError Traceback (most recent call last) <ipython-input-11-d075df7a2fe3> in <module>() 19 20 sm.graphics.plot_partregress( ---> 21 "a", "lg(b)", ["c"], obs_labels=False, data=df, eval_env=1 22 ) /usr/local/lib/python3.7/dist-packages/statsmodels/graphics/regressionplots.py in plot_partregress(endog, exog_i, exog_others, data, title_kwargs, obs_labels, label_kwargs, ax, ret_coords, **kwargs) 402 RHS_isemtpy = True 403 if isinstance(exog_i, str): --> 404 exog_i = dmatrix(exog_i + "-1", data) 405 406 # all arrays or pandas-like /usr/local/lib/python3.7/dist-packages/patsy/highlevel.py in dmatrix(formula_like, data, eval_env, NA_action, return_type) 289 eval_env = EvalEnvironment.capture(eval_env, reference=1) 290 (lhs, rhs) = _do_highlevel_design(formula_like, data, eval_env, --> 291 NA_action, return_type) 292 if lhs.shape[1] != 0: 293 raise PatsyError("encountered outcome variables for a model " /usr/local/lib/python3.7/dist-packages/patsy/highlevel.py in _do_highlevel_design(formula_like, data, eval_env, NA_action, return_type) 163 return iter([data]) 164 design_infos = _try_incr_builders(formula_like, data_iter_maker, eval_env, --> 165 NA_action) 166 if design_infos is not None: 167 return build_design_matrices(design_infos, data, /usr/local/lib/python3.7/dist-packages/patsy/highlevel.py in _try_incr_builders(formula_like, data_iter_maker, eval_env, NA_action) 68 data_iter_maker, 69 eval_env, ---> 70 NA_action) 71 else: 72 return None /usr/local/lib/python3.7/dist-packages/patsy/build.py in design_matrix_builders(termlists, data_iter_maker, eval_env, NA_action) 694 factor_states, 695 data_iter_maker, --> 696 NA_action) 697 # Now we need the factor infos, which encapsulate the knowledge of 698 # how to turn any given factor into a chunk of data: /usr/local/lib/python3.7/dist-packages/patsy/build.py in _examine_factor_types(factors, factor_states, data_iter_maker, NA_action) 441 for data in data_iter_maker(): 442 for factor in list(examine_needed): --> 443 value = factor.eval(factor_states[factor], data) 444 if factor in cat_sniffers or guess_categorical(value): 445 if factor not in cat_sniffers: /usr/local/lib/python3.7/dist-packages/patsy/eval.py in eval(self, memorize_state, data) 564 return self._eval(memorize_state["eval_code"], 565 memorize_state, --> 566 data) 567 568 __getstate__ = no_pickling /usr/local/lib/python3.7/dist-packages/patsy/eval.py in _eval(self, code, memorize_state, data) 549 memorize_state["eval_env"].eval, 550 code, --> 551 inner_namespace=inner_namespace) 552 553 def memorize_chunk(self, state, which_pass, data): /usr/local/lib/python3.7/dist-packages/patsy/compat.py in call_and_wrap_exc(msg, origin, f, *args, **kwargs) 41 origin) 42 # Use 'exec' to hide this syntax from the Python 2 parser: ---> 43 exec("raise new_exc from e") 44 else: 45 # In python 2, we just let the original exception escape -- better /usr/local/lib/python3.7/dist-packages/patsy/compat.py in <module>() PatsyError: Error evaluating factor: NameError: name 'lg' is not defined lg(b)-1 ^^^^^ ``` </details> #### Expected Output I expect the `lg()` function can be recognized by `patsy`. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.7.11.final.0 OS: Linux 5.4.104+ #1 SMP Sat Jun 5 09:50:34 PDT 2021 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.12.2 (/usr/local/lib/python3.7/dist-packages/statsmodels) Required Dependencies ===================== cython: 0.29.24 (/usr/local/lib/python3.7/dist-packages/Cython) numpy: 1.19.5 (/usr/local/lib/python3.7/dist-packages/numpy) scipy: 1.4.1 (/usr/local/lib/python3.7/dist-packages/scipy) pandas: 1.1.5 (/usr/local/lib/python3.7/dist-packages/pandas) dateutil: 2.8.2 (/usr/local/lib/python3.7/dist-packages/dateutil) patsy: 0.5.1 (/usr/local/lib/python3.7/dist-packages/patsy) Optional Dependencies ===================== matplotlib: 3.2.2 (/usr/local/lib/python3.7/dist-packages/matplotlib) backend: module://ipykernel.pylab.backend_inline cvxopt: 1.2.6 (/usr/local/lib/python3.7/dist-packages/cvxopt) joblib: 1.0.1 (/usr/local/lib/python3.7/dist-packages/joblib) Developer Tools ================ IPython: 5.5.0 (/usr/local/lib/python3.7/dist-packages/IPython) jinja2: 2.11.3 (/usr/local/lib/python3.7/dist-packages/jinja2) sphinx: 1.8.5 (/usr/local/lib/python3.7/dist-packages/sphinx) pygments: 2.6.1 (/usr/local/lib/python3.7/dist-packages/pygments) pytest: 3.6.4 (/usr/local/lib/python3.7/dist-packages) virtualenv: Not installed </details>
Does not work for me either. I don't understand why adding `eval_env` to the dmatrix calls does not help. :( (I'm running the example in a notebook.) I still have no idea why patsy doesn't find it. However, putting the call to plot_partialregress into a unit test function and eval_env=1, works with a pytest run of the unit test Also putting the code in a function in the notebook works ``` def plot_pr(): @np.vectorize def lg(x): return np.log10(x) if x > 0 else 0 fig = sm.graphics.plot_partregress( "a", "lg(b)", ["c"], obs_labels=False, data=df, eval_env=1 ) return fig plot_pr(); ``` But I have no idea why a plain script (or notebook) doesn't work. another version that I managed to get to work is to put `lg` inside the np namespace ``` np.lg = lg sm.graphics.plot_partregress( "a", "np.lg(b)", ["c"], obs_labels=False, data=df, eval_env=1 ) ``` (I'm using python 3.9.2 ) I still have no idea why patsy doesn't find it. However, putting the call to plot_partialregress into a unit test function and eval_env=1, works with a pytest run of the unit test Also putting the code in a function in the notebook works ``` def plot_pr(): @np.vectorize def lg(x): return np.log10(x) if x > 0 else 0 fig = sm.graphics.plot_partregress( "a", "lg(b)", ["c"], obs_labels=False, data=df, eval_env=1 ) return fig plot_pr(); ``` But I have no idea why a plain script (or notebook) doesn't work. another version that I managed to get to work is to put `lg` inside the np namespace ``` np.lg = lg sm.graphics.plot_partregress( "a", "np.lg(b)", ["c"], obs_labels=False, data=df, eval_env=1 ) ``` (I'm using python 3.9.2 ) The easiest workaround is to put the transformed variable into a dataframe column. Thanks @josef-pkt for the effort and the 2 possible workarounds. I didn't know that you can assign a function into `numpy`'s name space. trying again today, the eval_env option also works in notebook directly. (not sure what was going on) PR #7673
"2021-08-28T17:26:24Z"
0.12
[ "statsmodels/graphics/tests/test_regressionplots.py::TestCERESPlot::test_ceres_poisson", "statsmodels/graphics/tests/test_regressionplots.py::TestPartialResidualPlot::test_partial_residual_poisson", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_fit", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_leverage_resid2", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_oth", "statsmodels/graphics/tests/test_regressionplots.py::TestPlot::test_plot_influence", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_oth", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_influence", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_leverage_resid2", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotPandas::test_plot_fit", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_ab_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_model", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_ab", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_remove", "statsmodels/graphics/tests/test_regressionplots.py::TestABLine::test_abline_model_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_ab", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_remove", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_model", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_model_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestABLinePandas::test_abline_ab_ax", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_oth", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_influence", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_fit", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_plot_leverage_resid2", "statsmodels/graphics/tests/test_regressionplots.py::TestPlotFormula::test_one_column_exog", "statsmodels/graphics/tests/test_regressionplots.py::TestAddedVariablePlot::test_added_variable_poisson" ]
[ "statsmodels/graphics/tests/test_regressionplots.py::test_partregress_formula_env" ]
Python
[]
[]
statsmodels/statsmodels
7,693
statsmodels__statsmodels-7693
[ "6577" ]
c1ecba96eea169f950b2245c99e368d75c076429
diff --git a/statsmodels/genmod/generalized_linear_model.py b/statsmodels/genmod/generalized_linear_model.py --- a/statsmodels/genmod/generalized_linear_model.py +++ b/statsmodels/genmod/generalized_linear_model.py @@ -1768,7 +1768,7 @@ def aic(self): Akaike Information Criterion -2 * `llf` + 2 * (`df_model` + 1) """ - return -2 * self.llf + 2 * (self.df_model + 1) + return self.info_criteria("aic") @property def bic(self): @@ -1826,11 +1826,9 @@ def bic_llf(self): Based on the log-likelihood, -2 * `llf` + log(n) * (`df_model` + 1) """ - return -2*self.llf + (self.df_model+1)*np.log( - self.df_model+self.df_resid+1 - ) + return self.info_criteria("bic") - def info_criteria(self, crit, scale=None): + def info_criteria(self, crit, scale=None, dk_params=0): """Return an information criterion for the model. Parameters @@ -1840,6 +1838,11 @@ def info_criteria(self, crit, scale=None): scale : float The scale parameter estimated using the parent model, used only for qaic. + dk_params : int or float + Correction to the number of parameters used in the information + criterion. By default, only mean parameters are included, the + scale parameter is not included in the parameter count. + Use ``dk_params=1`` to include scale in the parameter count. Returns the given information criterion value. @@ -1860,13 +1863,15 @@ def info_criteria(self, crit, scale=None): Burnham KP, Anderson KR (2002). Model Selection and Multimodel Inference; Springer New York. """ - crit = crit.lower() + k_params = self.df_model + 1 + dk_params if crit == "aic": - return self.aic + return -2 * self.llf + 2 * k_params elif crit == "bic": - return self.bic + nobs = self.df_model + self.df_resid + 1 + bic = -2*self.llf + k_params*np.log(nobs) + return bic elif crit == "qaic": f = self.model.family fl = (families.Poisson, families.NegativeBinomial, @@ -1876,7 +1881,7 @@ def info_criteria(self, crit, scale=None): msg += "Negative Binomial families." warnings.warn(msg) llf = self.llf_scaled(scale=1) - return -2 * llf/scale + 2 * (self.df_model + 1) + return -2 * llf/scale + 2 * k_params @Appender(pred.get_prediction_glm.__doc__) def get_prediction(self, exog=None, exposure=None, offset=None, diff --git a/statsmodels/regression/linear_model.py b/statsmodels/regression/linear_model.py --- a/statsmodels/regression/linear_model.py +++ b/statsmodels/regression/linear_model.py @@ -1826,7 +1826,7 @@ def aic(self): For a model with a constant :math:`-2llf + 2(df\_model + 1)`. For a model without a constant :math:`-2llf + 2(df\_model)`. """ - return -2 * self.llf + 2 * (self.df_model + self.k_constant) + return self.info_criteria("aic") @cache_readonly def bic(self): @@ -1836,8 +1836,42 @@ def bic(self): For a model with a constant :math:`-2llf + \log(n)(df\_model+1)`. For a model without a constant :math:`-2llf + \log(n)(df\_model)`. """ - return (-2 * self.llf + np.log(self.nobs) * (self.df_model + - self.k_constant)) + return self.info_criteria("bic") + + def info_criteria(self, crit, dk_params=0): + """Return an information criterion for the model. + + Parameters + ---------- + crit : string + One of 'aic', 'bic', 'aicc' or 'hqic'. + dk_params : int or float + Correction to the number of parameters used in the information + criterion. By default, only mean parameters are included, the + scale parameter is not included in the parameter count. + Use ``dk_params=1`` to include scale in the parameter count. + + Returns the given information criterion value. + + References + ---------- + Burnham KP, Anderson KR (2002). Model Selection and Multimodel + Inference; Springer New York. + """ + crit = crit.lower() + k_params = self.df_model + self.k_constant + dk_params + + if crit == "aic": + return -2 * self.llf + 2 * k_params + elif crit == "bic": + bic = -2*self.llf + np.log(self.nobs) * k_params + return bic + elif crit == "aicc": + from statsmodels.tools.eval_measures import aicc + return aicc(self.llf, self.nobs, k_params) + elif crit == "hqic": + from statsmodels.tools.eval_measures import hqic + return hqic(self.llf, self.nobs, k_params) @cache_readonly def eigenvals(self): diff --git a/statsmodels/regression/rolling.py b/statsmodels/regression/rolling.py --- a/statsmodels/regression/rolling.py +++ b/statsmodels/regression/rolling.py @@ -525,6 +525,10 @@ def bic(self): with np.errstate(divide="ignore"): return self._wrap(RegressionResults.bic.func(self)) + def info_criteria(self, crit, dk_params=0): + return self._wrap(RegressionResults.info_criteria( + self, crit, dk_params=dk_params)) + @cache_readonly def params(self): """Estimated model parameters"""
diff --git a/statsmodels/genmod/tests/test_glm.py b/statsmodels/genmod/tests/test_glm.py --- a/statsmodels/genmod/tests/test_glm.py +++ b/statsmodels/genmod/tests/test_glm.py @@ -2540,6 +2540,7 @@ def test_output_exposure_null(reset_randomstate): # Check that they are different assert np.abs(null_model_without_exposure.llf - model.llnull) > 1 + def test_qaic(): # Example from documentation of R package MuMIn @@ -2562,7 +2563,11 @@ def test_qaic(): # presumably because they count the scale parameter in df. # This won't matter when comparing models by differencing # QAICs. + # Binomial doesn't have a scale parameter, so adding +1 is not correct. assert_allclose(qaic, 29.13266, rtol=1e-5, atol=1e-5) + qaic1 = r.info_criteria(crit="qaic", scale=scale, dk_params=1) + assert_allclose(qaic1, 31.13266, rtol=1e-5, atol=1e-5) + def test_tweedie_score(): diff --git a/statsmodels/regression/tests/test_regression.py b/statsmodels/regression/tests/test_regression.py --- a/statsmodels/regression/tests/test_regression.py +++ b/statsmodels/regression/tests/test_regression.py @@ -173,6 +173,15 @@ def test_loglike(self): def test_aic(self): assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic) + # the following just checks the definition + aicc1 = self.res1.info_criteria("aicc") + k = self.res1.df_model + self.res1.model.k_constant + nobs = self.res1.model.nobs + aicc2 = self.res1.aic + 2 * (k**2 + k) / (nobs - k - 1) + assert_allclose(aicc1, aicc2, rtol=1e-10) + hqic1 = self.res1.info_criteria("hqic") + hqic2 = (self.res1.aic - 2 * k) + 2 * np.log(np.log(nobs)) * k + assert_allclose(hqic1, hqic2, rtol=1e-10) decimal_bic = DECIMAL_4
AIC value inconsistent with R #### Describe the bug The definition of AIC in linear regression (sm.api.OLS()) does not match that of R. This is due to the fact that the number of parameters is defined as (df_model + 1), which does not take into account the variance term that is also estimated from the data. (This is why this bug happens in OLS but not in case of GLM.) I think this should have been (df_model + 2). This bug actually does not change rankings of models because the +1 term vanishes when comparing models, but the definition is conceptually wrong and would potentially cause incompatibilities with some of R functions. #### Code Sample, a copy-pastable example if possible import statsmodels.api as sm from sklearn.datasets import load_iris from numpy import c_, ones, log data = load_iris() X = c_[ones(data['data'].shape[0]), data['data'][:, 1]] y = data['data'][:, 0] n = data['data'].shape[0] model = sm.OLS(y, X) fit = model.fit() print(fit.aic) # gives 369.9916713254629 print(fit.bic) # gives 376.0129419136554 print(-2*fit.llf + 3*2) # R AIC: 371.9916713254629 print(-2*fit.llf + 3*log(n)) # R BIC: 381.02357720775166 ```python # Your code here that produces the bug # This example should be self-contained, and so not rely on external data. # It should run in a fresh ipython session, and so include all relevant imports. ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `master`. If your problem has been fixed in an unreleased version, you might be able to use `master` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the master branch of this repository? It helps the limited resources if we know problems exist in the current master so that they do not need to check whether the code sample produces a bug in the next release. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output A clear and concise description of what you expected to happen. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] </details>
same issue for BIC #1802 no decision to change it yet Label this as a FAQ. I think we are past the point where this can change. Still showing in 0.12 If it's wrong, could it be corrected? It's not just a diagnostic: if you're using the AIC/BIC to compare models, it can have a material effect. It does _not_ affect the relative rank of models with the same IC which is how IC are used in model selection. The plan is to add an `infocrit` method to results classes that has options. GLM also does not count an estimated scale or dispersion parameter in the ic. In OLS (and GLM), the parameter estimates and fit in terms of sum of squares, or residual measures is independent of a scale estimate. So there is not necessarily a reason to include the scale estimate when comparing models with different exog. But we do want consistent definitions across models, so we can us the ic for choosing among those, e.g. count models with different distributional assumptions. Just a note that state space models have an `info_criteria` method rather than an `infocrit` method. @ChadFulton Thanks, then we can coordinate on that. I haven't looked at tsa in some time.) @ChadFulton Is scale included in the count of parameters for ic in tsa and statespace? > Is scale included in the count of parameters for ic in tsa and statespace? TL;DR: `SARIMAX`, `ARIMA`, and `AutoReg` all include the scale in the definition of AIC, while `VAR` does not. -------- Scale is included in `df_model` and in the information criteria computations for state space models (e.g. `SARIMAX`). This is true both when it is estimated as part of the overall parameter vector and when we use the version where we first concentrate it out of the likelihood. I took a look at the old `ARIMA` model, and there it is also included in the information criteria. This model always estimates the scale by concentrating it out, however, `df_model` does not include report it (so the information criteria computations use the private `_ic_df_model`. The IC for both `AutoReg` and `VAR` are based on Lutkepohl's definition, and they (a) do not include the intercept terms, (b) drops the constant term, and (c) divide through by `nobs` (in fact, [Stata's `varsoc`](https://www.stata.com/manuals13/tsvarsoc.pdf) divides even its standard AIC through by `nobs`). However, `AutoReg` does include the scale term as a parameter (different from Lutkepohl's formulation) while `VAR` does not include the free terms in the error covariance matrix (the same as Lutkepohl's formulation). -------- One other thing that is interesting / annoying: the way that Lutkepohl forms the AIC by concentrating out the error covariance matrix implies that the constant term that is dropped is of the form `k_endog * (np.log(2 * np.pi) + 1)`, while in other cases (such as R's `forecast` package, if I remember correctly), the constant term that is dropped is of the form `k_endog * np.log(2 * np.pi)`. (Where, again, either of these cases may or may not also be multipled by `nobs`, depending on the definition used). @ChadFulton Thanks for the overview sounds also pretty messy outside full MLE One problem is that in the least squares and quasi-likelihood version we don't need a fully specified likelihood and variance/scale estimate are in most of those cases orthogonal to the mean parameter estimates. The traditional sum of squares, Sigma definition of aic divides by nobs https://www.statsmodels.org/devel/generated/statsmodels.tools.eval_measures.aic_sigma.html I'm only part way to understanding more of the IC for not fully specified or misspecified likelihood cases. e.g. TIC and GIC #6516 I still need to see how this works out in various models with specified or un-/misspecified likelihood function. Adding options for various definitions of IC pushes the decision which to use to the user :) As Kevin pointed out, any constant term is irrelevant for specification decisions within a model. But if we have different models or different distributional assumptions for the same underlying modelling task, then we need consistent definitions, whatever they are. Another issue: What is relevant in the IC might also depend on the target of the estimation. In OLS and similar we are interested in the mean function and we might not care about the exact distribution of the residuals. So scale and higher order properties of OLS or Poisson model are not of interest. If we are interested in a correctly specified distribution, then we don't want to ignore those extra features of the distribution. #7142 (I guess a tsa example would be an ARMA-GARCH that is separable in mean and variance model. For the specification search for the mean model, we don't really need to include all parameters from the GARCH model. If we want to check the overall fit, then we need the full model. My similar examples these days are count models and overdispersion)
"2021-09-06T17:24:20Z"
0.12
[ "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_params", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_bic", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_scale", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_params", "statsmodels/genmod/tests/test_glm.py::test_attribute_writable_resettable", "statsmodels/genmod/tests/test_glm.py::test_perfect_pred", "statsmodels/genmod/tests/test_glm.py::test_score_test_ols", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls", "statsmodels/genmod/tests/test_glm.py::test_loglike_no_opt", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls_eim", "statsmodels/genmod/tests/test_glm.py::test_formula_missing_exposure", "statsmodels/genmod/tests/test_glm.py::test_glm_start_params", "statsmodels/genmod/tests/test_glm.py::test_summary", "statsmodels/genmod/tests/test_glm.py::test_plots", "statsmodels/genmod/tests/test_glm.py::test_glm_irls_method", "statsmodels/genmod/tests/test_glm.py::test_wtd_patsy_missing", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_poisson_limit", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_upper_limit", "statsmodels/genmod/tests/test_glm.py::testTweediePowerEstimate", "statsmodels/genmod/tests/test_glm.py::test_glm_lasso_6431", "statsmodels/genmod/tests/test_glm.py::test_tweedie_elastic_net", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int8]", "statsmodels/genmod/tests/test_glm.py::test_glm_bic", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int64]", "statsmodels/genmod/tests/test_glm.py::test_poisson_deviance", "statsmodels/genmod/tests/test_glm.py::test_non_invertible_hessian_fails_summary", "statsmodels/genmod/tests/test_glm.py::test_int_scale", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int32]", "statsmodels/genmod/tests/test_glm.py::test_tweedie_score", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int16]", "statsmodels/genmod/tests/test_glm.py::test_glm_bic_warning", "statsmodels/genmod/tests/test_glm.py::test_output_exposure_null", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_offset_exposure", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_missing", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_predict", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_df", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution_binom_count", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog_formula", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_endog_dtype", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestRegularized::test_regularized", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_r", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_resid", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_mu", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_missing", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_resid", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_num", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_wresid", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_empty_model", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights_list", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC2_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestOLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid_zero_variance", "statsmodels/regression/tests/test_regression.py::TestOLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj_overfit", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC1_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestOLS::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestOLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestOLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC3_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestOLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestOLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC0_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_alternatives", "statsmodels/regression/tests/test_regression.py::TestOLS::test_eigenvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_normalized_cov_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestOLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestYuleWalker::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_1d", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_2d", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestRTO::test_wresid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_resids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestRTO::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestRTO::test_bic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestRTO::test_loglike", "statsmodels/regression/tests/test_regression.py::TestRTO::test_ess", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestRTO::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_scale", "statsmodels/regression/tests/test_regression.py::TestRTO::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestRTO::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestRTO::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestRTO::test_params", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestRTO::test_degrees", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_scale", "statsmodels/regression/tests/test_regression.py::test_wls_example", "statsmodels/regression/tests/test_regression.py::test_wls_tss", "statsmodels/regression/tests/test_regression.py::test_wls_missing", "statsmodels/regression/tests/test_regression.py::test_conf_int_single_regressor", "statsmodels/regression/tests/test_regression.py::test_fvalue_const_only", "statsmodels/regression/tests/test_regression.py::test_const_indicator", "statsmodels/regression/tests/test_regression.py::test_bad_size", "statsmodels/regression/tests/test_regression.py::test_summary_as_latex", "statsmodels/regression/tests/test_regression.py::test_fvalue_implicit_constant", "statsmodels/regression/tests/test_regression.py::test_regularized_refit", "statsmodels/regression/tests/test_regression.py::test_bool_regressor", "statsmodels/regression/tests/test_regression.py::test_regularized_options", "statsmodels/regression/tests/test_regression.py::test_burg_errors", "statsmodels/regression/tests/test_regression.py::test_ols_constant", "statsmodels/regression/tests/test_regression.py::test_burg", "statsmodels/regression/tests/test_regression.py::test_formula_missing_cat", "statsmodels/regression/tests/test_regression.py::test_missing_formula_predict", "statsmodels/regression/tests/test_regression.py::test_ridge", "statsmodels/regression/tests/test_regression.py::test_fvalue_only_constant", "statsmodels/regression/tests/test_regression.py::test_regularized_predict", "statsmodels/regression/tests/test_regression.py::test_summary_no_constant", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_denom", "statsmodels/regression/tests/test_regression.py::TestFtest::test_p", "statsmodels/regression/tests/test_regression.py::TestFtest::test_F", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_num", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params_none", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_scale", "statsmodels/regression/tests/test_regression.py::TestTtest::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestTtest::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_sd", "statsmodels/regression/tests/test_regression.py::TestTtest::test_new_tvalue", "statsmodels/regression/tests/test_regression.py::TestNonFit::test_df_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_num", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_new_ftest", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_CornerCases::test_wrong_size_weights", "statsmodels/regression/tests/test_regression.py::TestGLS::test_tvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS::test_resid", "statsmodels/regression/tests/test_regression.py::TestGLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestGLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS::test_fittedvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_loglike", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_bic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_wresid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_ess", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_degrees", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_params", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_scale", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_nodemean", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_homoskedastic", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_nonnested", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_demean", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_LRversion", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_ess", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_sd" ]
[ "statsmodels/genmod/tests/test_glm.py::test_qaic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_aic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_aic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_aic" ]
Python
[]
[]
statsmodels/statsmodels
7,696
statsmodels__statsmodels-7696
[ "6880" ]
c98477a45cbeffc12e192184e5d47a2cb74536dc
diff --git a/statsmodels/regression/linear_model.py b/statsmodels/regression/linear_model.py --- a/statsmodels/regression/linear_model.py +++ b/statsmodels/regression/linear_model.py @@ -2618,7 +2618,7 @@ def get_prediction(self, exog=None, transform=True, weights=None, self, exog=exog, transform=transform, weights=weights, row_labels=row_labels, **kwargs) - def summary(self, yname=None, xname=None, title=None, alpha=.05): + def summary(self, yname=None, xname=None, title=None, alpha=.05, slim=False): """ Summarize the Regression Results. @@ -2650,7 +2650,7 @@ def summary(self, yname=None, xname=None, title=None, alpha=.05): durbin_watson, jarque_bera, omni_normtest, - ) + ) jb, jbpv, skew, kurtosis = jarque_bera(self.wresid) omni, omnipv = omni_normtest(self.wresid) @@ -2696,19 +2696,28 @@ def summary(self, yname=None, xname=None, title=None, alpha=.05): ('BIC:', ["%#8.4g" % self.bic]) ] - diagn_left = [('Omnibus:', ["%#6.3f" % omni]), - ('Prob(Omnibus):', ["%#6.3f" % omnipv]), - ('Skew:', ["%#6.3f" % skew]), - ('Kurtosis:', ["%#6.3f" % kurtosis]) - ] - - diagn_right = [('Durbin-Watson:', - ["%#8.3f" % durbin_watson(self.wresid)] - ), - ('Jarque-Bera (JB):', ["%#8.3f" % jb]), - ('Prob(JB):', ["%#8.3g" % jbpv]), - ('Cond. No.', ["%#8.3g" % condno]) - ] + if slim: + slimlist = ['Dep. Variable:', 'Model:', 'No. Observations:', + 'Covariance Type:', 'R-squared:', 'Adj. R-squared:', + 'F-statistic:', 'Prob (F-statistic):'] + diagn_left = [] + diagn_right = [] + top_left = [elem for elem in top_left if elem[0] in slimlist] + top_right = [elem for elem in top_right if elem[0] in slimlist] + else: + diagn_left = [('Omnibus:', ["%#6.3f" % omni]), + ('Prob(Omnibus):', ["%#6.3f" % omnipv]), + ('Skew:', ["%#6.3f" % skew]), + ('Kurtosis:', ["%#6.3f" % kurtosis]) + ] + + diagn_right = [('Durbin-Watson:', + ["%#8.3f" % durbin_watson(self.wresid)] + ), + ('Jarque-Bera (JB):', ["%#8.3f" % jb]), + ('Prob(JB):', ["%#8.3g" % jbpv]), + ('Cond. No.', ["%#8.3g" % condno]) + ] if title is None: title = self.model.__class__.__name__ + ' ' + "Regression Results" @@ -2720,10 +2729,10 @@ def summary(self, yname=None, xname=None, title=None, alpha=.05): yname=yname, xname=xname, title=title) smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha, use_t=self.use_t) - - smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right, - yname=yname, xname=xname, - title="") + if not slim: + smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right, + yname=yname, xname=xname, + title="") # add warnings/notes, added to text format only etext = []
diff --git a/statsmodels/regression/tests/test_regression.py b/statsmodels/regression/tests/test_regression.py --- a/statsmodels/regression/tests/test_regression.py +++ b/statsmodels/regression/tests/test_regression.py @@ -331,6 +331,12 @@ def test_norm_resid(self): model_norm_resid = self.res1.resid_pearson assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7) + def test_summary_slim(self): + # check that slim summary is smaller, does not verify content + summ = self.res1.summary(slim=True) + assert len(summ.tables) == 2 + assert len(str(summ)) < 6700 + def test_norm_resid_zero_variance(self): with warnings.catch_warnings(record=True): y = self.res1.model.endog
slim ols results Add slim parameter to summary function. slim=True generates a minimal result table, default or slim=False generates the original output. - [ ] closes #xxxx - [ ] tests added / passed. - [x] code/documentation is well formatted. - [ ] properly formatted commit message. See [NumPy's guide](https://docs.scipy.org/doc/numpy-1.15.1/dev/gitwash/development_workflow.html#writing-the-commit-message). <details> **Notes**: * It is essential that you add a test when making code changes. Tests are not needed for doc changes. * When adding a new function, test values should usually be verified in another package (e.g., R/SAS/Stata). * When fixing a bug, you must add a test that would produce the bug in master and then show that it is fixed with the new code. * New code additions must be well formatted. Changes should pass flake8. If on Linux or OSX, you can verify you changes are well formatted by running ``` git diff upstream/master -u -- "*.py" | flake8 --diff --isolated ``` assuming `flake8` is installed. This command is also available on Windows using the Windows System for Linux once `flake8` is installed in the local Linux environment. While passing this test is not required, it is good practice and it help improve code quality in `statsmodels`. * Docstring additions must render correctly, including escapes and LaTeX. </details>
[![Coverage Status](https://coveralls.io/builds/32080879/badge)](https://coveralls.io/builds/32080879) Coverage increased (+0.003%) to 88.029% when pulling **69460e0c4dd386b15f9f361326866e5376c2c77a on janosbiro:slim-results** into **5c6e3f1a3b417b58c932c52ef885fa29c7d00b15 on statsmodels:master**. This pull request **introduces 2 alerts** when merging 7e0ec3c9e01c11d89da8f11857621c846fd294b7 into 79c51ffbc63d62b2c9c7ca4d3f45d89b9c745fd8 - [view on LGTM.com](https://lgtm.com/projects/g/statsmodels/statsmodels/rev/pr-236f81b66a3e49f904b3a15132442fb15c312cef) **new alerts:** * 2 for Unused local variable looks like too much code duplication to me. The overlapping parts should be only once in the code. What's removed when "slim"? AIC, BIC, diagnostic table, others ? hey, the slim results table is the following: [image: image.png] We are economists who are recoding their codes from stata and R. In the slim results we keep the most necessary results in table. Slim parameter removes diagnostics (bottom panel, method, date,time, aic,bic, etc...). br, J Josef Perktold <[email protected]> ezt Γ­rta (idΕ‘pont: 2020. jΓΊl. 10., P, 22:30): > looks like too much code duplication to me. > The overlapping parts should be only once in the code. > > What's removed when "slim"? AIC, BIC, diagnostic table, others ? > > β€” > You are receiving this because you authored the thread. > Reply to this email directly, view it on GitHub > <https://github.com/statsmodels/statsmodels/pull/6880#issuecomment-656876122>, > or unsubscribe > <https://github.com/notifications/unsubscribe-auth/AHYWCDU5QXQ63JG4DAEKMBLR2525VANCNFSM4OW3OJAQ> > . > -- BirΓ³ JΓ‘nos The image doesn't show up. It's easier if you just copy-paste the text version from `print(res.summary(slim=True))` ``` OLS Regression Results ============================================================================== Dep. Variable: lnprice R-squared: 0.244 Model: OLS Adj. R-squared: 0.234 No. Observations: 77 F-statistic: 24.20 Covariance Type: nonrobust Prob (F-statistic): 4.99e-06 ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ Intercept 3.1375 0.285 11.025 0.000 2.571 3.704 rating 0.3540 0.072 4.920 0.000 0.211 0.497 ============================================================================== ``` The implementation has too much copy-pasta for my tastes. I think this isn't a good way forward since it is still incredibly specific. A better way would be to have a function or class that would be configured to produce tables using standard and common property names. Hello @janosbiro! Thanks for updating this PR. We checked the lines you've touched for [PEPΒ 8](https://www.python.org/dev/peps/pep-0008) issues, and found: * In the file [`statsmodels/regression/linear_model.py`](https://github.com/statsmodels/statsmodels/blob/69460e0c4dd386b15f9f361326866e5376c2c77a/statsmodels/regression/linear_model.py): > [Line 2668:1](https://github.com/statsmodels/statsmodels/blob/69460e0c4dd386b15f9f361326866e5376c2c77a/statsmodels/regression/linear_model.py#L2668): [W293](https://duckduckgo.com/?q=pep8%20W293) blank line contains whitespace This pull request **introduces 2 alerts** when merging 69460e0c4dd386b15f9f361326866e5376c2c77a into 5c6e3f1a3b417b58c932c52ef885fa29c7d00b15 - [view on LGTM.com](https://lgtm.com/projects/g/statsmodels/statsmodels/rev/pr-a5ad10767e8d43a77feb429b3ceb2ddc3d836326) **new alerts:** * 2 for Unused local variable @bashtage Can you check it now please? :) I've rewrote without code duplication @josef-pkt can you check the last update please? We would like to use this python-package to teach econometrics for economists. But the original summary table is confusing for student's and most of the table elements not used, it's better to use a slim table. e.g.: Stata or R regression outputs is lightweight but they present the most important diagnostics and you can make the extras looks good to me, but I want to check it out because we don't have proper unit tests for summary A smoke test for the slim option should be added to the unit test, or one that assert the partial content. AFAIR we have just one or a few of those.
"2021-09-07T00:01:56Z"
0.12
[ "statsmodels/regression/tests/test_regression.py::TestOLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestOLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC3_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj_overfit", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestOLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_normalized_cov_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC1_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_alternatives", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC0_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC2_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestOLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestOLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestOLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestOLS::test_eigenvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid_zero_variance", "statsmodels/regression/tests/test_regression.py::TestOLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestOLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params_none", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_2d", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_1d", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNonFit::test_df_resid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLS_CornerCases::test_wrong_size_weights", "statsmodels/regression/tests/test_regression.py::TestTtest::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestTtest::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_sd", "statsmodels/regression/tests/test_regression.py::TestTtest::test_new_tvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestGLS::test_resid", "statsmodels/regression/tests/test_regression.py::TestGLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS::test_fittedvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_tvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestFtest::test_p", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_num", "statsmodels/regression/tests/test_regression.py::TestFtest::test_F", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_denom", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_num", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_new_ftest", "statsmodels/regression/tests/test_regression.py::test_wls_example", "statsmodels/regression/tests/test_regression.py::test_wls_tss", "statsmodels/regression/tests/test_regression.py::test_wls_missing", "statsmodels/regression/tests/test_regression.py::test_const_indicator", "statsmodels/regression/tests/test_regression.py::test_bad_size", "statsmodels/regression/tests/test_regression.py::test_summary_as_latex", "statsmodels/regression/tests/test_regression.py::test_conf_int_single_regressor", "statsmodels/regression/tests/test_regression.py::test_fvalue_const_only", "statsmodels/regression/tests/test_regression.py::test_fvalue_only_constant", "statsmodels/regression/tests/test_regression.py::test_regularized_predict", "statsmodels/regression/tests/test_regression.py::test_regularized_options", "statsmodels/regression/tests/test_regression.py::test_missing_formula_predict", "statsmodels/regression/tests/test_regression.py::test_ridge", "statsmodels/regression/tests/test_regression.py::test_summary_no_constant", "statsmodels/regression/tests/test_regression.py::test_regularized_refit", "statsmodels/regression/tests/test_regression.py::test_bool_regressor", "statsmodels/regression/tests/test_regression.py::test_fvalue_implicit_constant", "statsmodels/regression/tests/test_regression.py::test_ols_constant", "statsmodels/regression/tests/test_regression.py::test_formula_missing_cat", "statsmodels/regression/tests/test_regression.py::test_burg_errors", "statsmodels/regression/tests/test_regression.py::test_burg", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_loglike", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_degrees", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_aic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_params", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_bic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_scale", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_ess", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_wresid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_sd", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_homoskedastic", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_demean", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_nodemean", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_nonnested", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_LRversion", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_num", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestYuleWalker::test_params", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_empty_model", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights_list", "statsmodels/regression/tests/test_regression.py::TestRTO::test_wresid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestRTO::test_bic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestRTO::test_aic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestRTO::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestRTO::test_loglike", "statsmodels/regression/tests/test_regression.py::TestRTO::test_resids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_params", "statsmodels/regression/tests/test_regression.py::TestRTO::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestRTO::test_ess", "statsmodels/regression/tests/test_regression.py::TestRTO::test_scale", "statsmodels/regression/tests/test_regression.py::TestRTO::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_degrees", "statsmodels/regression/tests/test_regression.py::TestRTO::test_norm_resids" ]
[ "statsmodels/regression/tests/test_regression.py::TestOLS::test_summary_slim" ]
Python
[]
[]
statsmodels/statsmodels
7,697
statsmodels__statsmodels-7697
[ "7663" ]
01367e91d6597bc526cb39bc62f671a77c8fc1af
diff --git a/statsmodels/nonparametric/kde.py b/statsmodels/nonparametric/kde.py --- a/statsmodels/nonparametric/kde.py +++ b/statsmodels/nonparametric/kde.py @@ -33,6 +33,7 @@ triw=kernels.Triweight, cos=kernels.Cosine, cos2=kernels.Cosine2, + tric=kernels.Tricube ) diff --git a/statsmodels/sandbox/nonparametric/kernels.py b/statsmodels/sandbox/nonparametric/kernels.py --- a/statsmodels/sandbox/nonparametric/kernels.py +++ b/statsmodels/sandbox/nonparametric/kernels.py @@ -563,3 +563,16 @@ def __init__(self, h=1.0): self._L2Norm = 1.5 self._kernel_var = 0.03267274151216444 # = 1/12. - 0.5 / np.pi**2 self._order = 2 + +class Tricube(CustomKernel): + """ + Tricube Kernel + + K(u) = 0.864197530864 * (1 - abs(x)**3)**3 between -1.0 and 1.0 + """ + def __init__(self,h=1.0): + CustomKernel.__init__(self,shape=lambda x: 0.864197530864 * (1 - abs(x)**3)**3, + h=h, domain=[-1.0, 1.0], norm = 1.0) + self._L2Norm = 175.0/247.0 + self._kernel_var = 35.0/243.0 + self._order = 2
diff --git a/statsmodels/nonparametric/tests/test_kernels.py b/statsmodels/nonparametric/tests/test_kernels.py --- a/statsmodels/nonparametric/tests/test_kernels.py +++ b/statsmodels/nonparametric/tests/test_kernels.py @@ -149,3 +149,19 @@ class TestBiweight(CheckKernelMixin): kern = kernels.Biweight() se_n_diff = 9 low_rtol = 0.3 + + +def test_tricube(): + # > library(kedd) + # > xx = c(-1., -0.75, -0.5, -0.25, 0., 0.25, 0.5, 0.75, 1.) + # > res = kernel.fun(x = xx, kernel="tricube",deriv.order=0) + # > res$kx + + res_kx = [ + 0.0000000000000000, 0.1669853116259163, 0.5789448302469136, + 0.8243179321289062, 0.8641975308641975, 0.8243179321289062, + 0.5789448302469136, 0.1669853116259163, 0.0000000000000000 + ] + xx = np.linspace(-1, 1, 9) + kx = kernels.Tricube()(xx) + assert_allclose(kx, res_kx, rtol=1e-10)
Adding more kernels in KDE Statsmodels consists of various different kernels but few additions such as exponential kernel and quartic kernel could be a good addition to the group of kernels. The additions would be similar as other kernels, classes would be created in <code>statsmodels/sandbox/nonparametric/kernels.py</code>, which would be then called in the list of kernels in <code>statsmodel/nonparametric/kde.py</code>.
Do you have any code for those? I added asymmetric kernels, beta kernel (kernels for unit interval) and kernels for positive valued random variables, but they don't use an interface that is compatible with the older kernel functions in nonparametric, at least not with those used for MultivariateKDE. Asymmetric kernels don't have the property that they depend on distance between points, which is assumed by KDEUnivariate. Is exponential kernel an asymmetric kernel? I think you are correct here, exponential kernel is an asymmetric kernel and could be added in <code>kernels_asymmetric.py</code>. Looking into the implementation of asymmetric kernels, wouldn't it be better to have a single class of these kernels instead of separate functions? Re code, should I make a PR for quartic kernel? > Looking into the implementation of asymmetric kernels, wouldn't it be better to have a single class of these kernels instead of separate functions? Not until we have a better idea of how they fit in and will be reused. I wrote them mainly to work out the math and see how well they do. My current use of them is to support and compare with parametric multivariate distributions in the context of copulas, e.g. nonparametric Beta and Bernstein copulas/distributions. `MultivariateKDE` could use them and uses separate functions for cdf and pdf. But I didn't like the signature of those function and used a different argument list. Tying them into MultivariateKDE would automatically (?) give us optimal bandwidth selection. Exponential kernel might be a special case of an existing asymmetric kernel. But I don't remember those details. > Re code, should I make a PR for quartic kernel? Yes, that sounds useful. But I haven't looked at that part in a long time. aside: I also wrote asymmetric kernel `rvs` functions for generating random samples, but the only one for which I found references is Beta kernel. I use that one in copulas. I didn't add the rvs functions to statsmodels yet because I don't have references, and I wrote them "by analogy".
"2021-09-07T15:19:21Z"
0.12
[ "statsmodels/nonparametric/tests/test_kernels.py::TestEpan::test_smoothconf_data", "statsmodels/nonparametric/tests/test_kernels.py::TestEpan::test_smoothconf", "statsmodels/nonparametric/tests/test_kernels.py::TestUniform::test_smoothconf_data", "statsmodels/nonparametric/tests/test_kernels.py::TestUniform::test_smoothconf", "statsmodels/nonparametric/tests/test_kernels.py::TestBiweight::test_smoothconf", "statsmodels/nonparametric/tests/test_kernels.py::TestBiweight::test_smoothconf_data", "statsmodels/nonparametric/tests/test_kernels.py::TestGau::test_smoothconf", "statsmodels/nonparametric/tests/test_kernels.py::TestGau::test_smoothconf_data", "statsmodels/nonparametric/tests/test_kernels.py::TestTriangular::test_smoothconf_data", "statsmodels/nonparametric/tests/test_kernels.py::TestTriangular::test_smoothconf", "statsmodels/nonparametric/tests/test_kernels.py::TestCosine::test_smoothconf_data" ]
[ "statsmodels/nonparametric/tests/test_kernels.py::test_tricube" ]
Python
[]
[]
statsmodels/statsmodels
7,704
statsmodels__statsmodels-7704
[ "6780" ]
5f8d374f5123f58e2475bcb6ce215b7687eadceb
diff --git a/statsmodels/stats/outliers_influence.py b/statsmodels/stats/outliers_influence.py --- a/statsmodels/stats/outliers_influence.py +++ b/statsmodels/stats/outliers_influence.py @@ -6,19 +6,19 @@ Author: Josef Perktold License: BSD-3 """ +from statsmodels.compat.pandas import Appender +from statsmodels.compat.python import lzip + from collections import defaultdict import numpy as np -from statsmodels.compat.python import lzip -from statsmodels.compat.pandas import Appender from statsmodels.graphics._regressionplots_doc import _plot_influence_doc from statsmodels.regression.linear_model import OLS from statsmodels.stats.multitest import multipletests from statsmodels.tools.decorators import cache_readonly from statsmodels.tools.tools import maybe_unwrap_results - # outliers test convenience wrapper def outlier_test(model_results, method='bonf', alpha=.05, labels=None, @@ -148,7 +148,8 @@ def reset_ramsey(res, degree=5): def variance_inflation_factor(exog, exog_idx): - """variance inflation factor, VIF, for one exogenous variable + """ + Variance inflation factor, VIF, for one exogenous variable The variance inflation factor is a measure for the increase of the variance of the parameter estimates if an additional variable, given by @@ -162,7 +163,7 @@ def variance_inflation_factor(exog, exog_idx): Parameters ---------- - exog : ndarray + exog : {ndarray, DataFrame} design matrix with all explanatory variables, as for example used in regression exog_idx : int @@ -170,7 +171,7 @@ def variance_inflation_factor(exog, exog_idx): Returns ------- - vif : float + float variance inflation factor Notes @@ -186,6 +187,7 @@ def variance_inflation_factor(exog, exog_idx): https://en.wikipedia.org/wiki/Variance_inflation_factor """ k_vars = exog.shape[1] + exog = np.asarray(exog) x_i = exog[:, exog_idx] mask = np.arange(k_vars) != exog_idx x_noti = exog[:, mask] @@ -443,6 +445,7 @@ def cooks_distance(self): self.d_params.T).T).sum(1) cooks_d2 /= self.k_vars from scipy import stats + # alpha = 0.1 # print stats.f.isf(1-alpha, n_params, res.df_modelwc) # TODO use chi2 # use_f option @@ -713,6 +716,7 @@ def cooks_distance(self): cooks_d2 *= hii / (1 - hii) from scipy import stats + # alpha = 0.1 # print stats.f.isf(1-alpha, n_params, res.df_modelwc) pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid) @@ -1050,9 +1054,10 @@ def summary_table(self, float_fmt="%6.3f"): colnames, data = lzip(*table_raw) # unzip data = np.column_stack(data) self.table_data = data + from copy import deepcopy + from statsmodels.iolib.table import SimpleTable, default_html_fmt from statsmodels.iolib.tableformatting import fmt_base - from copy import deepcopy fmt = deepcopy(fmt_base) fmt_html = deepcopy(default_html_fmt) fmt['data_fmts'] = ["%4d"] + [float_fmt] * (data.shape[1] - 1) @@ -1081,6 +1086,7 @@ def summary_table(res, alpha=0.05): """ from scipy import stats + from statsmodels.sandbox.regression.predstd import wls_prediction_std infl = OLSInfluence(res) @@ -1127,9 +1133,10 @@ def summary_table(res, alpha=0.05): colnames = ss2 # self.table_data = data # data = np.column_stack(data) + from copy import deepcopy + from statsmodels.iolib.table import SimpleTable, default_html_fmt from statsmodels.iolib.tableformatting import fmt_base - from copy import deepcopy fmt = deepcopy(fmt_base) fmt_html = deepcopy(default_html_fmt) fmt['data_fmts'] = ["%4d"] + ["%6.3f"] * (data.shape[1] - 1) @@ -1252,6 +1259,7 @@ def cooks_distance(self): cooks_d2 *= hii / (1 - hii) from scipy import stats + # alpha = 0.1 # print stats.f.isf(1-alpha, n_params, res.df_modelwc) pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)
diff --git a/statsmodels/stats/tests/test_outliers_influence.py b/statsmodels/stats/tests/test_outliers_influence.py --- a/statsmodels/stats/tests/test_outliers_influence.py +++ b/statsmodels/stats/tests/test_outliers_influence.py @@ -1,16 +1,28 @@ +import numpy as np from numpy.testing import assert_almost_equal from statsmodels.datasets import statecrime from statsmodels.regression.linear_model import OLS -from statsmodels.stats.outliers_influence import reset_ramsey +from statsmodels.stats.outliers_influence import ( + reset_ramsey, + variance_inflation_factor, +) from statsmodels.tools import add_constant -data = statecrime.load_pandas().data - def test_reset_stata(): - mod = OLS(data.violent, add_constant(data[['murder', 'hs_grad']])) + data = statecrime.load_pandas().data + mod = OLS(data.violent, add_constant(data[["murder", "hs_grad"]])) res = mod.fit() stat = reset_ramsey(res, degree=4) assert_almost_equal(stat.fvalue[0, 0], 1.52, decimal=2) assert_almost_equal(stat.pvalue, 0.2221, decimal=4) + + exog_idx = list(data.columns).index("urban") + data_arr = np.asarray(data) + vif = variance_inflation_factor(data_arr, exog_idx) + assert_almost_equal(vif, 16.4394, decimal=4) + + exog_idx = list(data.columns).index("urban") + vif_df = variance_inflation_factor(data, exog_idx) + assert_almost_equal(vif_df, 16.4394, decimal=4)
ENH: Add pandas dataframe capability to variance_inflation_factor Proposing a small change to the variance_inflation_factor() method in the outliers_influence package, in order to allow `exog` input to be a pandas DataFrame as well as a numpy array. In the test, the value computed for the VIF using my proposed code edit with a pandas dataframe input is 16.4394, which I compare to the value computed using the current state of the method, taking an array as input.
"2021-09-08T16:48:17Z"
0.12
[]
[ "statsmodels/stats/tests/test_outliers_influence.py::test_reset_stata" ]
Python
[]
[]
statsmodels/statsmodels
7,709
statsmodels__statsmodels-7709
[ "7446" ]
d503f0b5811b807ee79bd9b0fcf1fd0e336fadc1
diff --git a/statsmodels/tsa/arima_process.py b/statsmodels/tsa/arima_process.py --- a/statsmodels/tsa/arima_process.py +++ b/statsmodels/tsa/arima_process.py @@ -16,16 +16,25 @@ Author: josefpktd License: BSD """ +from statsmodels.compat.pandas import Appender + import numpy as np -from scipy import signal, optimize, linalg +from scipy import linalg, optimize, signal -from statsmodels.compat.pandas import Appender -from statsmodels.tools.docstring import remove_parameters, Docstring +from statsmodels.tools.docstring import Docstring, remove_parameters from statsmodels.tools.validation import array_like -__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample', - 'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve', - 'lpol2index', 'index2lpol'] +__all__ = [ + "arma_acf", + "arma_acovf", + "arma_generate_sample", + "arma_impulse_response", + "arma2ar", + "arma2ma", + "deconvolve", + "lpol2index", + "index2lpol", +] NONSTATIONARY_ERROR = """\ @@ -34,8 +43,9 @@ """ -def arma_generate_sample(ar, ma, nsample, scale=1, distrvs=None, - axis=0, burnin=0): +def arma_generate_sample( + ar, ma, nsample, scale=1, distrvs=None, axis=0, burnin=0 +): """ Simulate data from an ARMA. @@ -145,7 +155,7 @@ def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None): m = max(p, q) + 1 if sigma2.real < 0: - raise ValueError('Must have positive innovation variance.') + raise ValueError("Must have positive innovation variance.") # Short-circuit for trivial corner-case if p == q == 0: @@ -164,11 +174,11 @@ def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None): b = np.zeros((m, 1), dtype=dtype) # We need a zero-right-padded version of ar params tmp_ar = np.zeros(m, dtype=dtype) - tmp_ar[:p + 1] = ar + tmp_ar[: p + 1] = ar for k in range(m): - A[k, :(k + 1)] = tmp_ar[:(k + 1)][::-1] - A[k, 1:m - k] += tmp_ar[(k + 1):m] - b[k] = sigma2 * np.dot(ma[k:q + 1], ma_coeffs[:max((q + 1 - k), 0)]) + A[k, : (k + 1)] = tmp_ar[: (k + 1)][::-1] + A[k, 1 : m - k] += tmp_ar[(k + 1) : m] + b[k] = sigma2 * np.dot(ma[k : q + 1], ma_coeffs[: max((q + 1 - k), 0)]) acovf = np.zeros(max(nobs, m), dtype=dtype) try: acovf[:m] = np.linalg.solve(A, b)[:, 0] @@ -178,8 +188,9 @@ def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None): # Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances if nobs > m: zi = signal.lfiltic([1], ar, acovf[:m:][::-1]) - acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype), - zi=zi)[0] + acovf[m:] = signal.lfilter( + [1], ar, np.zeros(nobs - m, dtype=dtype), zi=zi + )[0] return acovf[:nobs] @@ -240,7 +251,7 @@ def arma_pacf(ar, ma, lags=10): apacf = np.zeros(lags) acov = arma_acf(ar, ma, lags=lags + 1) - apacf[0] = 1. + apacf[0] = 1.0 for k in range(2, lags + 1): r = acov[:k] apacf[k - 1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1] @@ -286,8 +297,11 @@ def arma_periodogram(ar, ma, worN=None, whole=0): if np.any(np.isnan(h)): # this happens with unit root or seasonal unit root' import warnings - warnings.warn('Warning: nan in frequency response h, maybe a unit ' - 'root', RuntimeWarning) + + warnings.warn( + "Warning: nan in frequency response h, maybe a unit " "root", + RuntimeWarning, + ) return w, sd @@ -346,7 +360,7 @@ def arma_impulse_response(ar, ma, leads=100): 0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685]) """ impulse = np.zeros(leads) - impulse[0] = 1. + impulse[0] = 1.0 return signal.lfilter(ma, ar, impulse) @@ -401,7 +415,7 @@ def arma2ar(ar, ma, lags=100): # moved from sandbox.tsa.try_fi -def ar2arma(ar_des, p, q, n=20, mse='ar', start=None): +def ar2arma(ar_des, p, q, n=20, mse="ar", start=None): """ Find arma approximation to ar process. @@ -448,9 +462,9 @@ def ar2arma(ar_des, p, q, n=20, mse='ar', start=None): # p,q = pq def msear_err(arma, ar_des): - ar, ma = np.r_[1, arma[:p - 1]], np.r_[1, arma[p - 1:]] + ar, ma = np.r_[1, arma[: p - 1]], np.r_[1, arma[p - 1 :]] ar_approx = arma_impulse_response(ma, ar, n) - return (ar_des - ar_approx) # ((ar - ar_approx)**2).sum() + return ar_des - ar_approx # ((ar - ar_approx)**2).sum() if start is None: arma0 = np.r_[-0.9 * np.ones(p - 1), np.zeros(q - 1)] @@ -458,13 +472,12 @@ def msear_err(arma, ar_des): arma0 = start res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000) arma_app = np.atleast_1d(res[0]) - ar_app = np.r_[1, arma_app[:p - 1]], - ma_app = np.r_[1, arma_app[p - 1:]] + ar_app = (np.r_[1, arma_app[: p - 1]],) + ma_app = np.r_[1, arma_app[p - 1 :]] return ar_app, ma_app, res -_arma_docs = {'ar': arma2ar.__doc__, - 'ma': arma2ma.__doc__} +_arma_docs = {"ar": arma2ar.__doc__, "ma": arma2ma.__doc__} def lpol2index(ar): @@ -483,7 +496,7 @@ def lpol2index(ar): index : ndarray index (lags) of lag polynomial with non-zero elements """ - ar = array_like(ar, 'ar') + ar = array_like(ar, "ar") index = np.nonzero(ar)[0] coeffs = ar[index] return coeffs, index @@ -530,6 +543,7 @@ def lpol_fima(d, n=20): """ # hide import inside function until we use this heavily from scipy.special import gammaln + j = np.arange(n) return np.exp(gammaln(d + j) - gammaln(j + 1) - gammaln(d)) @@ -558,8 +572,9 @@ def lpol_fiar(d, n=20): """ # hide import inside function until we use this heavily from scipy.special import gammaln + j = np.arange(n) - ar = - np.exp(gammaln(-d + j) - gammaln(j + 1) - gammaln(-d)) + ar = -np.exp(gammaln(-d + j) - gammaln(j + 1) - gammaln(-d)) ar[0] = 1 return ar @@ -625,7 +640,7 @@ def deconvolve(num, den, n=None): input = np.zeros(n, float) input[0] = 1 quot = signal.lfilter(num, den, input) - num_approx = signal.convolve(den, quot, mode='full') + num_approx = signal.convolve(den, quot, mode="full") if len(num) < len(num_approx): # 1d only ? num = np.concatenate((num, np.zeros(len(num_approx) - len(num)))) rem = num - num_approx @@ -633,9 +648,9 @@ def deconvolve(num, den, n=None): _generate_sample_doc = Docstring(arma_generate_sample.__doc__) -_generate_sample_doc.remove_parameters(['ar', 'ma']) -_generate_sample_doc.replace_block('Notes', []) -_generate_sample_doc.replace_block('Examples', []) +_generate_sample_doc.remove_parameters(["ar", "ma"]) +_generate_sample_doc.replace_block("Notes", []) +_generate_sample_doc.replace_block("Examples", []) class ArmaProcess(object): @@ -710,11 +725,11 @@ class ArmaProcess(object): # TODO: Check unit root behavior def __init__(self, ar=None, ma=None, nobs=100): if ar is None: - ar = np.array([1.]) + ar = np.array([1.0]) if ma is None: - ma = np.array([1.]) - self.ar = array_like(ar, 'ar') - self.ma = array_like(ma, 'ma') + ma = np.array([1.0]) + self.ar = array_like(ar, "ar") + self.ma = array_like(ma, "ma") self.arcoefs = -self.ar[1:] self.macoefs = self.ma[1:] self.arpoly = np.polynomial.Polynomial(self.ar) @@ -756,18 +771,20 @@ def from_coeffs(cls, arcoefs=None, macoefs=None, nobs=100): """ arcoefs = [] if arcoefs is None else arcoefs macoefs = [] if macoefs is None else macoefs - return cls(np.r_[1, -np.asarray(arcoefs)], - np.r_[1, np.asarray(macoefs)], - nobs=nobs) + return cls( + np.r_[1, -np.asarray(arcoefs)], + np.r_[1, np.asarray(macoefs)], + nobs=nobs, + ) @classmethod def from_estimation(cls, model_results, nobs=None): """ - Create an ArmaProcess from the results of an ARMA estimation. + Create an ArmaProcess from the results of an ARIMA estimation. Parameters ---------- - model_results : ARMAResults instance + model_results : ARIMAResults instance A fitted model. nobs : int, optional If None, nobs is taken from the results. @@ -776,11 +793,18 @@ def from_estimation(cls, model_results, nobs=None): ------- ArmaProcess Class instance initialized from model_results. + + See Also + -------- + statsmodels.tsa.arima.model.ARIMA + The models class used to create the ArmaProcess """ - arcoefs = model_results.arparams - macoefs = model_results.maparams nobs = nobs or model_results.nobs - return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs) + return cls( + model_results.polynomial_reduced_ar, + model_results.polynomial_reduced_ma, + nobs=nobs, + ) def __mul__(self, oth): if isinstance(oth, self.__class__): @@ -794,50 +818,55 @@ def __mul__(self, oth): ar = (self.arpoly * arpolyoth).coef ma = (self.mapoly * mapolyoth).coef except: - raise TypeError('Other type is not a valid type') + raise TypeError("Other type is not a valid type") return self.__class__(ar, ma, nobs=self.nobs) def __repr__(self): - msg = 'ArmaProcess({0}, {1}, nobs={2}) at {3}' - return msg.format(self.ar.tolist(), self.ma.tolist(), - self.nobs, hex(id(self))) + msg = "ArmaProcess({0}, {1}, nobs={2}) at {3}" + return msg.format( + self.ar.tolist(), self.ma.tolist(), self.nobs, hex(id(self)) + ) def __str__(self): - return 'ArmaProcess\nAR: {0}\nMA: {1}'.format(self.ar.tolist(), - self.ma.tolist()) + return "ArmaProcess\nAR: {0}\nMA: {1}".format( + self.ar.tolist(), self.ma.tolist() + ) - @Appender(remove_parameters(arma_acovf.__doc__, ['ar', 'ma', 'sigma2'])) + @Appender(remove_parameters(arma_acovf.__doc__, ["ar", "ma", "sigma2"])) def acovf(self, nobs=None): nobs = nobs or self.nobs return arma_acovf(self.ar, self.ma, nobs=nobs) - @Appender(remove_parameters(arma_acf.__doc__, ['ar', 'ma'])) + @Appender(remove_parameters(arma_acf.__doc__, ["ar", "ma"])) def acf(self, lags=None): lags = lags or self.nobs return arma_acf(self.ar, self.ma, lags=lags) - @Appender(remove_parameters(arma_pacf.__doc__, ['ar', 'ma'])) + @Appender(remove_parameters(arma_pacf.__doc__, ["ar", "ma"])) def pacf(self, lags=None): lags = lags or self.nobs return arma_pacf(self.ar, self.ma, lags=lags) - @Appender(remove_parameters(arma_periodogram.__doc__, ['ar', 'ma', 'worN', - 'whole'])) + @Appender( + remove_parameters( + arma_periodogram.__doc__, ["ar", "ma", "worN", "whole"] + ) + ) def periodogram(self, nobs=None): nobs = nobs or self.nobs return arma_periodogram(self.ar, self.ma, worN=nobs) - @Appender(remove_parameters(arma_impulse_response.__doc__, ['ar', 'ma'])) + @Appender(remove_parameters(arma_impulse_response.__doc__, ["ar", "ma"])) def impulse_response(self, leads=None): leads = leads or self.nobs return arma_impulse_response(self.ar, self.ma, leads=leads) - @Appender(remove_parameters(arma2ma.__doc__, ['ar', 'ma'])) + @Appender(remove_parameters(arma2ma.__doc__, ["ar", "ma"])) def arma2ma(self, lags=None): lags = lags or self.lags return arma2ma(self.ar, self.ma, lags=lags) - @Appender(remove_parameters(arma2ar.__doc__, ['ar', 'ma'])) + @Appender(remove_parameters(arma2ar.__doc__, ["ar", "ma"])) def arma2ar(self, lags=None): lags = lags or self.lags return arma2ar(self.ar, self.ma, lags=lags) @@ -908,7 +937,7 @@ def invertroots(self, retnew=False): mainv = self.ma invertible = self.isinvertible if not invertible: - pr[np.abs(pr) < 1] = 1. / pr[np.abs(pr) < 1] + pr[np.abs(pr) < 1] = 1.0 / pr[np.abs(pr) < 1] pnew = np.polynomial.Polynomial.fromroots(pr) mainv = pnew.coef / pnew.coef[0] @@ -918,7 +947,9 @@ def invertroots(self, retnew=False): return mainv, invertible @Appender(str(_generate_sample_doc)) - def generate_sample(self, nsample=100, scale=1., distrvs=None, axis=0, - burnin=0): - return arma_generate_sample(self.ar, self.ma, nsample, scale, distrvs, - axis=axis, burnin=burnin) + def generate_sample( + self, nsample=100, scale=1.0, distrvs=None, axis=0, burnin=0 + ): + return arma_generate_sample( + self.ar, self.ma, nsample, scale, distrvs, axis=axis, burnin=burnin + )
diff --git a/statsmodels/tsa/tests/test_arima_process.py b/statsmodels/tsa/tests/test_arima_process.py --- a/statsmodels/tsa/tests/test_arima_process.py +++ b/statsmodels/tsa/tests/test_arima_process.py @@ -1,4 +1,4 @@ -from unittest import TestCase +import datetime as dt import numpy as np from numpy.testing import ( @@ -9,9 +9,11 @@ assert_equal, assert_raises, ) +import pandas as pd import pytest from statsmodels.sandbox.tsa.fftarma import ArmaFft +from statsmodels.tsa.arima.model import ARIMA from statsmodels.tsa.arima_process import ( ArmaProcess, arma_acf, @@ -251,7 +253,7 @@ def test_lpol2index_index2lpol(): assert_equal(process.arcoefs, ar) -class TestArmaProcess(TestCase): +class TestArmaProcess: def test_empty_coeff(self): process = ArmaProcess() assert_equal(process.arcoefs, np.array([])) @@ -411,3 +413,23 @@ def test_periodogram(self): pg = process.periodogram() assert_almost_equal(pg[0], np.linspace(0, np.pi, 100, False)) assert_almost_equal(pg[1], np.sqrt(2 / np.pi) / 2 * np.ones(100)) + + [email protected]("d", [0, 1]) [email protected]("seasonal", [True]) +def test_from_estimation(d, seasonal): + ar = [0.8] if not seasonal else [0.8, 0, 0, 0.2, -0.16] + ma = [0.4] if not seasonal else [0.4, 0, 0, 0.2, -0.08] + ap = ArmaProcess.from_coeffs(ar, ma, 500) + idx = pd.date_range(dt.datetime(1900, 1, 1), periods=500, freq="Q") + data = ap.generate_sample(500) + if d == 1: + data = np.cumsum(data) + data = pd.Series(data, index=idx) + seasonal_order = (1, 0, 1, 4) if seasonal else None + mod = ARIMA(data, order=(1, d, 1), seasonal_order=seasonal_order) + res = mod.fit() + ap_from = ArmaProcess.from_estimation(res) + shape = (5,) if seasonal else (1,) + assert ap_from.arcoefs.shape == shape + assert ap_from.macoefs.shape == shape
BUG: Remove/rewrite from_estimation from ArmaProcess #### Describe the bug An attempt to create a pure AR model results in attribute error #### Code Sample, a copy-pastable example if possible ```python import numpy as np import statsmodels from statsmodels.tsa.arima_process import ArmaProcess from statsmodels.tsa.arima.model import ARIMA some_data = np.random.randn(100) modeling_result = ARIMA(some_data, order=(4, 0, 0)).fit() process = ArmaProcess.from_estimation(modeling_result) ``` <details> Error backtrace: --------------------------------------------------------------------------- ``` AttributeError Traceback (most recent call last) <ipython-input-1-49dc4cff01dd> in <module> 6 some_data = np.random.randn(100) 7 modeling_result = ARIMA(some_data, order=(4, 0, 0)).fit() ----> 8 process = ArmaProcess.from_estimation(modeling_result) c:\python39\lib\site-packages\statsmodels\tsa\arima_process.py in from_estimation(cls, model_results, nobs) 779 """ 780 arcoefs = model_results.arparams --> 781 macoefs = model_results.maparams 782 nobs = nobs or model_results.nobs 783 return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs) c:\python39\lib\site-packages\statsmodels\base\wrapper.py in __getattribute__(self, attr) 32 pass 33 ---> 34 obj = getattr(results, attr) 35 data = results.model.data 36 how = self._wrap_attrs.get(attr) pandas\_libs\properties.pyx in pandas._libs.properties.CachedProperty.__get__() c:\python39\lib\site-packages\statsmodels\tsa\statespace\sarimax.py in maparams(self) 1946 zero. 1947 """ -> 1948 return self._params_ma 1949 1950 @cache_readonly AttributeError: 'ARIMAResults' object has no attribute '_params_ma' ``` ---------------------------------------------------------------------------- Just changing line 1948 in sarimax.py to ``` try: return self._params_ma except AttributeError: return [] ``` fixes the issue. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output Some ARMA process instance #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.9.2.final.0 statsmodels =========== Installed: 0.12.2 (c:\python39\lib\site-packages\statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.19.5 (c:\python39\lib\site-packages\numpy) scipy: 1.6.0 (c:\python39\lib\site-packages\scipy) pandas: 1.2.1 (c:\python39\lib\site-packages\pandas) dateutil: 2.8.1 (c:\python39\lib\site-packages\dateutil) patsy: 0.5.1 (c:\python39\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.3.4 (c:\python39\lib\site-packages\matplotlib) backend: module://ipykernel.pylab.backend_inline cvxopt: Not installed joblib: 1.0.1 (c:\python39\lib\site-packages\joblib) Developer Tools ================ IPython: 7.19.0 (c:\python39\lib\site-packages\IPython) jinja2: 2.11.2 (c:\python39\lib\site-packages\jinja2) sphinx: Not installed pygments: 2.7.4 (c:\python39\lib\site-packages\pygments) pytest: Not installed virtualenv: Not installed </details>
You are using the wrong `ARIMA`. Unfortunately the right `ARIMA` is deprecated, and so the only workaround is to parse the model parameters yourself to ArmaProcess. Oh, I see. Thanks for reply then! Closing as answered.
"2021-09-09T08:40:32Z"
0.12
[ "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma0-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma2-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma3-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma1-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_impulse_response", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma1-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma3-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma2-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma0-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma0-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma2-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_lpol2index_index2lpol", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma1-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_acovf", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma0-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_fi", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma1-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma1-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_acf", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma0-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma0-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma3-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma2-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma3-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma1-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_acovf_persistent", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma3-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma3-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma3-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma2-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma1-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma3-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma1-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_acf_compare_R_ARMAacf", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma1-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma0-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma3-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma0-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma2-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma0-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma1-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma2-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma2-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma1-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_acov_compare_theoretical_arma_acov", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma2-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma0-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma0-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma2-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma3-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma0-ar1]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma2-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma1-ar3]", "statsmodels/tsa/tests/test_arima_process.py::test_armafft[ma3-ar0]", "statsmodels/tsa/tests/test_arima_process.py::test_spectrum[ma3-ar2]", "statsmodels/tsa/tests/test_arima_process.py::test_arma_generate_sample[standard_normal-ma2-ar1]", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_pacf", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_generate_sample", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_from_coeff", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_impulse_response", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_isstationary", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_str_repr", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_arma2ar", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_acf", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_invertroots", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_process_multiplication", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_empty_coeff", "statsmodels/tsa/tests/test_arima_process.py::TestArmaProcess::test_periodogram" ]
[ "statsmodels/tsa/tests/test_arima_process.py::test_from_estimation[True-1]", "statsmodels/tsa/tests/test_arima_process.py::test_from_estimation[True-0]" ]
Python
[]
[]
statsmodels/statsmodels
7,710
statsmodels__statsmodels-7710
[ "7321" ]
152ac7b2eb31404b46f5f0f5f080f9abc5ce89b6
diff --git a/statsmodels/stats/descriptivestats.py b/statsmodels/stats/descriptivestats.py --- a/statsmodels/stats/descriptivestats.py +++ b/statsmodels/stats/descriptivestats.py @@ -525,6 +525,9 @@ def _safe_jarque_bera(c): output = f"{{0:{fmt}}}%" perc.index = [output.format(val) for val in index] + # Add in the names of the percentiles to the output + self._stats = self._stats + perc.index.tolist() + return self._reorder(pd.concat([results_df, perc], axis=0)) @cache_readonly
diff --git a/statsmodels/stats/tests/test_descriptivestats.py b/statsmodels/stats/tests/test_descriptivestats.py --- a/statsmodels/stats/tests/test_descriptivestats.py +++ b/statsmodels/stats/tests/test_descriptivestats.py @@ -164,7 +164,15 @@ def test_description_basic(df): def test_odd_percentiles(df): percentiles = np.linspace(7.0, 93.0, 13) res = Description(df, percentiles=percentiles) - print(res.frame.index) + stats = [ + 'nobs', 'missing', 'mean', 'std_err', 'upper_ci', 'lower_ci', 'std', + 'iqr', 'iqr_normal', 'mad', 'mad_normal', 'coef_var', 'range', 'max', + 'min', 'skew', 'kurtosis', 'jarque_bera', 'jarque_bera_pval', 'mode', + 'mode_freq', 'median', 'distinct', 'top_1', 'top_2', 'top_3', 'top_4', + 'top_5', 'freq_1', 'freq_2', 'freq_3', 'freq_4', 'freq_5', '7.0%', + '14.1%', '21.3%', '28.5%', '35.6%', '42.8%', '50.0%', '57.1%', '64.3%', + '71.5%', '78.6%', '85.8%', '93.0%'] + assert_equal(res.frame.index.tolist(), stats) def test_large_ntop(df):
BUG: fix percentiles being dropped in Description frame. - [x] closes #7316 - [x] tests added / passed. - [x] code/documentation is well formatted. - [x] properly formatted commit message @bashtage I am not very familiar with this function, so if you have a moment to check if I did something sensible, that would be great!
"2021-09-09T09:35:56Z"
0.12
[ "statsmodels/stats/tests/test_descriptivestats.py::TestSimpleTable::test_basic_2", "statsmodels/stats/tests/test_descriptivestats.py::TestSimpleTable::test_basic_4", "statsmodels/stats/tests/test_descriptivestats.py::TestSimpleTable::test_describe_summary_float_ndarray", "statsmodels/stats/tests/test_descriptivestats.py::TestSimpleTable::test_basic_2a", "statsmodels/stats/tests/test_descriptivestats.py::TestSimpleTable::test_basic_4a", "statsmodels/stats/tests/test_descriptivestats.py::TestSimpleTable::test_basic_3", "statsmodels/stats/tests/test_descriptivestats.py::test_sign_test", "statsmodels/stats/tests/test_descriptivestats.py::test_description_basic", "statsmodels/stats/tests/test_descriptivestats.py::test_use_t", "statsmodels/stats/tests/test_descriptivestats.py::test_special_stats[top]", "statsmodels/stats/tests/test_descriptivestats.py::test_describe", "statsmodels/stats/tests/test_descriptivestats.py::test_special_stats[freq]", "statsmodels/stats/tests/test_descriptivestats.py::test_extension_types", "statsmodels/stats/tests/test_descriptivestats.py::test_special_stats[mode]", "statsmodels/stats/tests/test_descriptivestats.py::test_large_ntop", "statsmodels/stats/tests/test_descriptivestats.py::test_special_stats[jarque_bera]", "statsmodels/stats/tests/test_descriptivestats.py::test_special_stats[ci]", "statsmodels/stats/tests/test_descriptivestats.py::test_description_exceptions", "statsmodels/stats/tests/test_descriptivestats.py::test_empty_columns" ]
[ "statsmodels/stats/tests/test_descriptivestats.py::test_odd_percentiles" ]
Python
[]
[]
statsmodels/statsmodels
7,713
statsmodels__statsmodels-7713
[ "6095" ]
9245093d15f2eca00c74cb45a68836948a8ff17a
diff --git a/statsmodels/stats/oaxaca.py b/statsmodels/stats/oaxaca.py --- a/statsmodels/stats/oaxaca.py +++ b/statsmodels/stats/oaxaca.py @@ -1,6 +1,6 @@ -# TODO Variance can be calculated for the three_fold -# TODO Group Size Effects can be accounted for -# TODO Non-Linear Oaxaca-Blinder can be used +# TODO Non-Linear Regressions can be used +# TODO Further decomposition of the two_fold parameters i.e. +# the delta method for further two_fold detail """ Author: Austin Adams @@ -8,7 +8,7 @@ a OaxacaResults Class: OaxacaBlinder: -Two-Fold/Pooled (two_fold) +Two-Fold (two_fold) Three-Fold (three_fold) OaxacaResults: @@ -44,10 +44,12 @@ A. S. Blinder "Wage Discrimination: Reduced Form and Structural Estimates," The Journal of Human Resources, 1973. """ +from textwrap import dedent + +import numpy as np + from statsmodels.regression.linear_model import OLS from statsmodels.tools.tools import add_constant -import numpy as np -from textwrap import dedent class OaxacaBlinder(object): @@ -108,21 +110,37 @@ class OaxacaBlinder(object): >>> model.three_fold().summary() Oaxaca-Blinder Three-fold Effects - Characteristic Effect: 321.74824 + Endowments Effect: 321.74824 Coefficient Effect: 75.45371 Interaction Effect: -238.45151 Gap: 158.75044 """ - def __init__(self, endog, exog, bifurcate, hasconst=True, - swap=True, cov_type='nonrobust', cov_kwds=None): - if str(type(exog)).find('pandas') != -1: + def __init__( + self, + endog, + exog, + bifurcate, + hasconst=True, + swap=True, + cov_type="nonrobust", + cov_kwds=None, + ): + if str(type(exog)).find("pandas") != -1: bifurcate = exog.columns.get_loc(bifurcate) endog, exog = np.array(endog), np.array(exog) + self.two_fold_type = None + self.bifurcate = bifurcate + self.cov_type = cov_type + self.cov_kwds = cov_kwds + self.neumark = np.delete(exog, bifurcate, axis=1) + self.exog = exog + self.hasconst = hasconst bi_col = exog[:, bifurcate] endog = np.column_stack((bi_col, endog)) bi = np.unique(bi_col) + self.bi_col = bi_col # split the data along the bifurcate axis, the issue is you need to # delete it after you fit the model for the total model. @@ -134,72 +152,312 @@ def __init__(self, endog, exog, bifurcate, hasconst=True, exog_s = np.delete(exog_s, bifurcate, axis=1) endog_f = endog_f[:, 1] endog_s = endog_s[:, 1] - endog = endog[:, 1] + self.endog = endog[:, 1] + self.len_f, self.len_s = len(endog_f), len(endog_s) self.gap = endog_f.mean() - endog_s.mean() if swap and self.gap < 0: endog_f, endog_s = endog_s, endog_f exog_f, exog_s = exog_s, exog_f self.gap = endog_f.mean() - endog_s.mean() + bi[0], bi[1] = bi[1], bi[0] + + self.bi = bi if hasconst is False: exog_f = add_constant(exog_f, prepend=False) exog_s = add_constant(exog_s, prepend=False) - exog = add_constant(exog, prepend=False) - - self._t_model = OLS(endog, exog).fit( - cov_type=cov_type, - cov_kwds=cov_kwds) - self._f_model = OLS(endog_f, exog_f).fit( - cov_type=cov_type, - cov_kwds=cov_kwds) - self._s_model = OLS(endog_s, exog_s).fit( - cov_type=cov_type, - cov_kwds=cov_kwds) + self.exog = add_constant(self.exog, prepend=False) + self.neumark = add_constant(self.neumark, prepend=False) self.exog_f_mean = np.mean(exog_f, axis=0) self.exog_s_mean = np.mean(exog_s, axis=0) - self.t_params = np.delete(self._t_model.params, bifurcate) - def three_fold(self): + self._f_model = OLS(endog_f, exog_f).fit( + cov_type=cov_type, cov_kwds=cov_kwds + ) + self._s_model = OLS(endog_s, exog_s).fit( + cov_type=cov_type, cov_kwds=cov_kwds + ) + + def variance(self, decomp_type, n=5000, conf=0.99): + """ + A helper function to calculate the variance/std. Used to keep + the decomposition functions cleaner + """ + if self.submitted_n is not None: + n = self.submitted_n + if self.submitted_conf is not None: + conf = self.submitted_conf + if self.submitted_weight is not None: + submitted_weight = [ + self.submitted_weight, + 1 - self.submitted_weight, + ] + bi = self.bi + bifurcate = self.bifurcate + endow_eff_list = [] + coef_eff_list = [] + int_eff_list = [] + exp_eff_list = [] + unexp_eff_list = [] + for _ in range(0, n): + endog = np.column_stack((self.bi_col, self.endog)) + exog = self.exog + amount = len(endog) + + samples = np.random.randint(0, high=amount, size=amount) + endog = endog[samples] + exog = exog[samples] + neumark = np.delete(exog, bifurcate, axis=1) + + exog_f = exog[np.where(exog[:, bifurcate] == bi[0])] + exog_s = exog[np.where(exog[:, bifurcate] == bi[1])] + endog_f = endog[np.where(endog[:, 0] == bi[0])] + endog_s = endog[np.where(endog[:, 0] == bi[1])] + exog_f = np.delete(exog_f, bifurcate, axis=1) + exog_s = np.delete(exog_s, bifurcate, axis=1) + endog_f = endog_f[:, 1] + endog_s = endog_s[:, 1] + endog = endog[:, 1] + + two_fold_type = self.two_fold_type + + if self.hasconst is False: + exog_f = add_constant(exog_f, prepend=False) + exog_s = add_constant(exog_s, prepend=False) + exog = add_constant(exog, prepend=False) + neumark = add_constant(neumark, prepend=False) + + _f_model = OLS(endog_f, exog_f).fit( + cov_type=self.cov_type, cov_kwds=self.cov_kwds + ) + _s_model = OLS(endog_s, exog_s).fit( + cov_type=self.cov_type, cov_kwds=self.cov_kwds + ) + exog_f_mean = np.mean(exog_f, axis=0) + exog_s_mean = np.mean(exog_s, axis=0) + + if decomp_type == 3: + endow_eff = (exog_f_mean - exog_s_mean) @ _s_model.params + coef_eff = exog_s_mean @ (_f_model.params - _s_model.params) + int_eff = (exog_f_mean - exog_s_mean) @ ( + _f_model.params - _s_model.params + ) + + endow_eff_list.append(endow_eff) + coef_eff_list.append(coef_eff) + int_eff_list.append(int_eff) + + elif decomp_type == 2: + len_f = len(exog_f) + len_s = len(exog_s) + + if two_fold_type == "cotton": + t_params = (len_f / (len_f + len_s) * _f_model.params) + ( + len_s / (len_f + len_s) * _s_model.params + ) + + elif two_fold_type == "reimers": + t_params = 0.5 * (_f_model.params + _s_model.params) + + elif two_fold_type == "self_submitted": + t_params = ( + submitted_weight[0] * _f_model.params + + submitted_weight[1] * _s_model.params + ) + + elif two_fold_type == "nuemark": + _t_model = OLS(endog, neumark).fit( + cov_type=self.cov_type, cov_kwds=self.cov_kwds + ) + t_params = _t_model.params + + else: + _t_model = OLS(endog, exog).fit( + cov_type=self.cov_type, cov_kwds=self.cov_kwds + ) + t_params = np.delete(_t_model.params, bifurcate) + + unexplained = (exog_f_mean @ (_f_model.params - t_params)) + ( + exog_s_mean @ (t_params - _s_model.params) + ) + + explained = (exog_f_mean - exog_s_mean) @ t_params + + unexp_eff_list.append(unexplained) + exp_eff_list.append(explained) + + high, low = int(n * conf), int(n * (1 - conf)) + if decomp_type == 3: + return [ + np.std(np.sort(endow_eff_list)[low:high]), + np.std(np.sort(coef_eff_list)[low:high]), + np.std(np.sort(int_eff_list)[low:high]), + ] + elif decomp_type == 2: + return [ + np.std(np.sort(unexp_eff_list)[low:high]), + np.std(np.sort(exp_eff_list)[low:high]), + ] + + def three_fold(self, std=False, n=None, conf=None): """ Calculates the three-fold Oaxaca Blinder Decompositions + Parameters + ---------- + std: boolean, optional + If true, bootstrapped standard errors will be calculated. + n: int, optional + A amount of iterations to calculate the bootstrapped + standard errors. This defaults to 5000. + conf: float, optional + This is the confidence required for the standard error + calculation. Defaults to .99, but could be anything less + than or equal to one. One is heavy discouraged, due to the + extreme outliers inflating the variance. + Returns ------- OaxacaResults A results container for the three-fold decomposition. """ - - self.char_eff = ( - (self.exog_f_mean - self.exog_s_mean) - @ self._s_model.params) - self.coef_eff = self.exog_s_mean @ (self._f_model.params - - self._s_model.params) - self.int_eff = ((self.exog_f_mean - self.exog_s_mean) - @ (self._f_model.params - self._s_model.params)) + self.submitted_n = n + self.submitted_conf = conf + self.submitted_weight = None + std_val = None + self.endow_eff = ( + self.exog_f_mean - self.exog_s_mean + ) @ self._s_model.params + self.coef_eff = self.exog_s_mean @ ( + self._f_model.params - self._s_model.params + ) + self.int_eff = (self.exog_f_mean - self.exog_s_mean) @ ( + self._f_model.params - self._s_model.params + ) + + if std is True: + std_val = self.variance(3) return OaxacaResults( - (self.char_eff, self.coef_eff, - self.int_eff, self.gap), 3) - - def two_fold(self): + (self.endow_eff, self.coef_eff, self.int_eff, self.gap), + 3, + std_val=std_val, + ) + + def two_fold( + self, + std=False, + two_fold_type="pooled", + submitted_weight=None, + n=None, + conf=None, + ): """ Calculates the two-fold or pooled Oaxaca Blinder Decompositions + Methods + ------- + std: boolean, optional + If true, bootstrapped standard errors will be calculated. + + two_fold_type: string, optional + This method allows for the specific calculation of the + non-discriminatory model. There are four different types + available at this time. pooled, cotton, reimers, self_submitted. + Pooled is assumed and if a non-viable parameter is given, + pooled will be ran. + + pooled - This type assumes that the pooled model's parameters + (a normal regression) is the non-discriminatory model. + This includes the indicator variable. This is generally + the best idea. If you have economic justification for + using others, then use others. + + nuemark - This is similar to the pooled type, but the regression + is not done including the indicator variable. + + cotton - This type uses the adjusted in Cotton (1988), which + accounts for the undervaluation of one group causing the + overevalution of another. It uses the sample size weights for + a linear combination of the two model parameters + + reimers - This type uses a linear combination of the two + models with both parameters being 50% of the + non-discriminatory model. + + self_submitted - This allows the user to submit their + own weights. Please be sure to put the weight of the larger mean + group only. This should be submitted in the + submitted_weights variable. + + submitted_weight: int/float, required only for self_submitted, + This is the submitted weight for the larger mean. If the + weight for the larger mean is p, then the weight for the + other mean is 1-p. Only submit the first value. + + n: int, optional + A amount of iterations to calculate the bootstrapped + standard errors. This defaults to 5000. + conf: float, optional + This is the confidence required for the standard error + calculation. Defaults to .99, but could be anything less + than or equal to one. One is heavy discouraged, due to the + extreme outliers inflating the variance. + Returns ------- OaxacaResults A results container for the two-fold decomposition. """ - self.unexplained = ((self.exog_f_mean - @ (self._f_model.params - self.t_params)) - + (self.exog_s_mean - @ (self.t_params - self._s_model.params))) + self.submitted_n = n + self.submitted_conf = conf + std_val = None + self.two_fold_type = two_fold_type + self.submitted_weight = submitted_weight + + if two_fold_type == "cotton": + self.t_params = ( + self.len_f / (self.len_f + self.len_s) * self._f_model.params + ) + (self.len_s / (self.len_f + self.len_s) * self._s_model.params) + + elif two_fold_type == "reimers": + self.t_params = 0.5 * (self._f_model.params + self._s_model.params) + + elif two_fold_type == "self_submitted": + if submitted_weight is None: + raise ValueError("Please submit weights") + submitted_weight = [submitted_weight, 1 - submitted_weight] + self.t_params = ( + submitted_weight[0] * self._f_model.params + + submitted_weight[1] * self._s_model.params + ) + + elif two_fold_type == "nuemark": + self._t_model = OLS(self.endog, self.neumark).fit( + cov_type=self.cov_type, cov_kwds=self.cov_kwds + ) + self.t_params = self._t_model.params + + else: + self._t_model = OLS(self.endog, self.exog).fit( + cov_type=self.cov_type, cov_kwds=self.cov_kwds + ) + self.t_params = np.delete(self._t_model.params, self.bifurcate) + + self.unexplained = ( + self.exog_f_mean @ (self._f_model.params - self.t_params) + ) + (self.exog_s_mean @ (self.t_params - self._s_model.params)) self.explained = (self.exog_f_mean - self.exog_s_mean) @ self.t_params - return OaxacaResults((self.unexplained, self.explained, self.gap), 2) + if std is True: + std_val = self.variance(2) + + return OaxacaResults( + (self.unexplained, self.explained, self.gap), 2, std_val=std_val + ) class OaxacaResults: @@ -208,11 +466,14 @@ class OaxacaResults: Use .summary() to get a table of the fitted values or use .params to receive a list of the values + use .std to receive a list of the standard errors If a two-fold model was fitted, this will return unexplained effect, explained effect, and the - mean gap. The list will be of the following order - and type. + mean gap. The list will always be of the following order + and type. If standard error was asked for, then standard error + calculations will also be included for each variable after each + calculated effect. unexplained : float This is the effect that cannot be explained by the data at hand. @@ -225,9 +486,11 @@ class OaxacaResults: If a three-fold model was fitted, this will return characteristic effect, coefficient effect interaction effect, and the mean gap. The list will - be of the following order and type. + be of the following order and type. If standard error was asked + for, then standard error calculations will also be included for + each variable after each calculated effect. - characteristic effect : float + endowment effect : float This is the effect due to the group differences in predictors coefficient effect : float @@ -243,9 +506,13 @@ class OaxacaResults: ---------- params A list of all values for the fitted models. + std + A list of standard error calculations. """ - def __init__(self, results, model_type): + + def __init__(self, results, model_type, std_val=None): self.params = results + self.std = std_val self.model_type = model_type def summary(self): @@ -253,22 +520,57 @@ def summary(self): Print a summary table with the Oaxaca-Blinder effects """ if self.model_type == 2: - print(dedent("""\ - Oaxaca-Blinder Two-fold Effects - - Unexplained Effect: {:.5f} - Explained Effect: {:.5f} - Gap: {:.5f}""".format( - self.params[0], self.params[1], - self.params[2]))) - + if self.std is None: + print( + dedent( + f"""\ + Oaxaca-Blinder Two-fold Effects + Unexplained Effect: {self.params[0]:.5f} + Explained Effect: {self.params[1]:.5f} + Gap: {self.params[2]:.5f}""" + ) + ) + else: + print( + dedent( + """\ + Oaxaca-Blinder Two-fold Effects + Unexplained Effect: {:.5f} + Unexplained Standard Error: {:.5f} + Explained Effect: {:.5f} + Explained Standard Error: {:.5f} + Gap: {:.5f}""".format( + self.params[0], + self.std[0], + self.params[1], + self.std[1], + self.params[2], + ) + ) + ) if self.model_type == 3: - print(dedent("""\ - Oaxaca-Blinder Three-fold Effects - - Characteristic Effect: {:.5f} - Coefficient Effect: {:.5f} - Interaction Effect: {:.5f} - Gap: {:.5f}""".format( - self.params[0], self.params[1], - self.params[2], self.params[3]))) + if self.std is None: + print( + dedent( + f"""\ + Oaxaca-Blinder Three-fold Effects + Endowment Effect: {self.params[0]:.5f} + Coefficient Effect: {self.params[1]:.5f} + Interaction Effect: {self.params[2]:.5f} + Gap: {self.params[3]:.5f}""" + ) + ) + else: + print( + dedent( + f"""\ + Oaxaca-Blinder Three-fold Effects + Endowment Effect: {self.params[0]:.5f} + Endowment Standard Error: {self.std[0]:.5f} + Coefficient Effect: {self.params[1]:.5f} + Coefficient Standard Error: {self.std[1]:.5f} + Interaction Effect: {self.params[2]:.5f} + Interaction Standard Error: {self.std[2]:.5f} + Gap: {self.params[3]:.5f}""" + ) + )
diff --git a/statsmodels/stats/tests/test_oaxaca.py b/statsmodels/stats/tests/test_oaxaca.py --- a/statsmodels/stats/tests/test_oaxaca.py +++ b/statsmodels/stats/tests/test_oaxaca.py @@ -3,6 +3,9 @@ # so I cannot test for having no intercept. This also would make # no sense for Oaxaca. All of these stata_results # are from using the oaxaca command in STATA. +# Variance from STATA is bootstrapped. Sometimes STATA +# does not converge correctly, so mulitple iterations +# must be done. import numpy as np @@ -14,7 +17,8 @@ endog = pandas_df.endog.values exog = add_constant(pandas_df.exog.values, prepend=False) pd_endog, pd_exog = pandas_df.endog, add_constant( - pandas_df.exog, prepend=False) + pandas_df.exog, prepend=False +) class TestOaxaca(object): @@ -23,12 +27,15 @@ def setup_class(cls): cls.model = OaxacaBlinder(endog, exog, 3) def test_results(self): + np.random.seed(0) stata_results = np.array([158.7504, 321.7482, 75.45371, -238.4515]) stata_results_pooled = np.array([158.7504, 130.8095, 27.94091]) - char, coef, inter, gap = self.model.three_fold().params + stata_results_std = np.array([653.10389, 64.584796, 655.0323717]) + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params + endow_var, coef_var, inter_var = self.model.three_fold(True).std np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) @@ -36,6 +43,10 @@ def test_results(self): np.testing.assert_almost_equal(exp, stata_results_pooled[1], 3) np.testing.assert_almost_equal(unexp, stata_results_pooled[2], 3) + np.testing.assert_almost_equal(endow_var, stata_results_std[0], 3) + np.testing.assert_almost_equal(coef_var, stata_results_std[1], 3) + np.testing.assert_almost_equal(inter_var, stata_results_std[2], 3) + class TestOaxacaNoSwap(object): @classmethod @@ -45,10 +56,10 @@ def setup_class(cls): def test_results(self): stata_results = np.array([-158.7504, -83.29674, 162.9978, -238.4515]) stata_results_pooled = np.array([-158.7504, -130.8095, -27.94091]) - char, coef, inter, gap = self.model.three_fold().params + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) @@ -60,15 +71,15 @@ def test_results(self): class TestOaxacaPandas(object): @classmethod def setup_class(cls): - cls.model = OaxacaBlinder(pd_endog, pd_exog, 'OWNRENT') + cls.model = OaxacaBlinder(pd_endog, pd_exog, "OWNRENT") def test_results(self): stata_results = np.array([158.7504, 321.7482, 75.45371, -238.4515]) stata_results_pooled = np.array([158.7504, 130.8095, 27.94091]) - char, coef, inter, gap = self.model.three_fold().params + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) @@ -80,15 +91,15 @@ def test_results(self): class TestOaxacaPandasNoSwap(object): @classmethod def setup_class(cls): - cls.model = OaxacaBlinder(pd_endog, pd_exog, 'OWNRENT', swap=False) + cls.model = OaxacaBlinder(pd_endog, pd_exog, "OWNRENT", swap=False) def test_results(self): stata_results = np.array([-158.7504, -83.29674, 162.9978, -238.4515]) stata_results_pooled = np.array([-158.7504, -130.8095, -27.94091]) - char, coef, inter, gap = self.model.three_fold().params + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) @@ -100,17 +111,17 @@ def test_results(self): class TestOaxacaNoConstPassed(object): @classmethod def setup_class(cls): - cls.model = OaxacaBlinder(pandas_df.endog.values, - pandas_df.exog.values, - 3, hasconst=False) + cls.model = OaxacaBlinder( + pandas_df.endog.values, pandas_df.exog.values, 3, hasconst=False + ) def test_results(self): stata_results = np.array([158.7504, 321.7482, 75.45371, -238.4515]) stata_results_pooled = np.array([158.7504, 130.8095, 27.94091]) - char, coef, inter, gap = self.model.three_fold().params + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) @@ -122,17 +133,21 @@ def test_results(self): class TestOaxacaNoSwapNoConstPassed(object): @classmethod def setup_class(cls): - cls.model = OaxacaBlinder(pandas_df.endog.values, - pandas_df.exog.values, - 3, hasconst=False, swap=False) + cls.model = OaxacaBlinder( + pandas_df.endog.values, + pandas_df.exog.values, + 3, + hasconst=False, + swap=False, + ) def test_results(self): stata_results = np.array([-158.7504, -83.29674, 162.9978, -238.4515]) stata_results_pooled = np.array([-158.7504, -130.8095, -27.94091]) - char, coef, inter, gap = self.model.three_fold().params + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) @@ -144,17 +159,17 @@ def test_results(self): class TestOaxacaPandasNoConstPassed(object): @classmethod def setup_class(cls): - cls.model = OaxacaBlinder(pandas_df.endog, - pandas_df.exog, - 'OWNRENT', hasconst=False) + cls.model = OaxacaBlinder( + pandas_df.endog, pandas_df.exog, "OWNRENT", hasconst=False + ) def test_results(self): stata_results = np.array([158.7504, 321.7482, 75.45371, -238.4515]) stata_results_pooled = np.array([158.7504, 130.8095, 27.94091]) - char, coef, inter, gap = self.model.three_fold().params + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) @@ -166,19 +181,114 @@ def test_results(self): class TestOaxacaPandasNoSwapNoConstPassed(object): @classmethod def setup_class(cls): - cls.model = OaxacaBlinder(pandas_df.endog, pandas_df.exog, - 'OWNRENT', hasconst=False, swap=False) + cls.model = OaxacaBlinder( + pandas_df.endog, + pandas_df.exog, + "OWNRENT", + hasconst=False, + swap=False, + ) def test_results(self): stata_results = np.array([-158.7504, -83.29674, 162.9978, -238.4515]) stata_results_pooled = np.array([-158.7504, -130.8095, -27.94091]) - char, coef, inter, gap = self.model.three_fold().params + endow, coef, inter, gap = self.model.three_fold().params unexp, exp, gap = self.model.two_fold().params np.testing.assert_almost_equal(gap, stata_results[0], 3) - np.testing.assert_almost_equal(char, stata_results[1], 3) + np.testing.assert_almost_equal(endow, stata_results[1], 3) np.testing.assert_almost_equal(coef, stata_results[2], 3) np.testing.assert_almost_equal(inter, stata_results[3], 3) np.testing.assert_almost_equal(gap, stata_results_pooled[0], 3) np.testing.assert_almost_equal(exp, stata_results_pooled[1], 3) np.testing.assert_almost_equal(unexp, stata_results_pooled[2], 3) + + +class TestOneModel(object): + @classmethod + def setup_class(cls): + np.random.seed(0) + cls.one_model = OaxacaBlinder( + pandas_df.endog, pandas_df.exog, "OWNRENT", hasconst=False + ).two_fold(True, two_fold_type="self_submitted", submitted_weight=1) + + def test_results(self): + unexp, exp, gap = self.one_model.params + unexp_std, exp_std = self.one_model.std + one_params_stata_results = np.array([75.45370, 83.29673, 158.75044]) + one_std_stata_results = np.array([64.58479, 71.05619]) + + np.testing.assert_almost_equal(unexp, one_params_stata_results[0], 3) + np.testing.assert_almost_equal(exp, one_params_stata_results[1], 3) + np.testing.assert_almost_equal(gap, one_params_stata_results[2], 3) + + np.testing.assert_almost_equal(unexp_std, one_std_stata_results[0], 3) + np.testing.assert_almost_equal(exp_std, one_std_stata_results[1], 3) + + +class TestZeroModel(object): + @classmethod + def setup_class(cls): + np.random.seed(0) + cls.zero_model = OaxacaBlinder( + pandas_df.endog, pandas_df.exog, "OWNRENT", hasconst=False + ).two_fold(True, two_fold_type="self_submitted", submitted_weight=0) + + def test_results(self): + unexp, exp, gap = self.zero_model.params + unexp_std, exp_std = self.zero_model.std + zero_params_stata_results = np.array([-162.9978, 321.7482, 158.75044]) + zero_std_stata_results = np.array([668.1512, 653.10389]) + + np.testing.assert_almost_equal(unexp, zero_params_stata_results[0], 3) + np.testing.assert_almost_equal(exp, zero_params_stata_results[1], 3) + np.testing.assert_almost_equal(gap, zero_params_stata_results[2], 3) + + np.testing.assert_almost_equal(unexp_std, zero_std_stata_results[0], 3) + np.testing.assert_almost_equal(exp_std, zero_std_stata_results[1], 3) + + +class TestOmegaModel(object): + @classmethod + def setup_class(cls): + np.random.seed(0) + cls.omega_model = OaxacaBlinder( + pandas_df.endog, pandas_df.exog, "OWNRENT", hasconst=False + ).two_fold(True, two_fold_type="nuemark") + + def test_results(self): + unexp, exp, gap = self.omega_model.params + unexp_std, exp_std = self.omega_model.std + nue_params_stata_results = np.array([19.52467, 139.22577, 158.75044]) + nue_std_stata_results = np.array([59.82744, 48.25425]) + + np.testing.assert_almost_equal(unexp, nue_params_stata_results[0], 3) + np.testing.assert_almost_equal(exp, nue_params_stata_results[1], 3) + np.testing.assert_almost_equal(gap, nue_params_stata_results[2], 3) + + np.testing.assert_almost_equal(unexp_std, nue_std_stata_results[0], 3) + np.testing.assert_almost_equal(exp_std, nue_std_stata_results[1], 3) + + +class TestPooledModel(object): + @classmethod + def setup_class(cls): + np.random.seed(0) + cls.pooled_model = OaxacaBlinder( + pandas_df.endog, pandas_df.exog, "OWNRENT", hasconst=False + ).two_fold(True) + + def test_results(self): + unexp, exp, gap = self.pooled_model.params + unexp_std, exp_std = self.pooled_model.std + pool_params_stata_results = np.array( + [27.940908, 130.809536, 158.75044] + ) + pool_std_stata_results = np.array([89.209487, 58.612367]) + + np.testing.assert_almost_equal(unexp, pool_params_stata_results[0], 3) + np.testing.assert_almost_equal(exp, pool_params_stata_results[1], 3) + np.testing.assert_almost_equal(gap, pool_params_stata_results[2], 3) + + np.testing.assert_almost_equal(unexp_std, pool_std_stata_results[0], 3) + np.testing.assert_almost_equal(exp_std, pool_std_stata_results[1], 3)
WIP: Oaxaca Variance/Other Models Hi everyone. I should have added more ability for the end user to choose what type of non-discriminatory model they would like. It used to be a pooled only model, but now users can pass into two_fold() optionally what type of non-discriminatory model they wish to. I have checked these results with STATA and they are the same. I haven't written tests for them, but I will. The real issue is that I cannot seem to get the variance/standard error calculations to work. I found what the R and Stata command cite as their method for calculating the variance, but I can't get the same numbers. This is what they cite: http://repec.org/dsug2005/oaxaca_se_handout.pdf. I would appreciate some help to figure out where I am going wrong. Var of the three_fold is much easier than the two_fold, so once the three_fold is figured out, I can move onto the two_fold. The output can be seen in the results by calling the .var on the results class. This calculation is the variance instead of the standard error. I could also bootstrap the standard errors like the R command does, but it takes a while.
"2021-09-09T14:56:29Z"
0.12
[ "statsmodels/stats/tests/test_oaxaca.py::TestOaxacaNoSwap::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOaxacaPandasNoSwapNoConstPassed::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOaxacaNoConstPassed::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOaxacaPandas::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOaxacaPandasNoConstPassed::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOaxacaPandasNoSwap::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOaxacaNoSwapNoConstPassed::test_results" ]
[ "statsmodels/stats/tests/test_oaxaca.py::TestZeroModel::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestPooledModel::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOneModel::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOmegaModel::test_results", "statsmodels/stats/tests/test_oaxaca.py::TestOaxaca::test_results" ]
Python
[]
[]
statsmodels/statsmodels
7,721
statsmodels__statsmodels-7721
[ "5199" ]
ba72432abcf2b36664588511a1512ed79ff01fd4
diff --git a/statsmodels/tools/validation/validation.py b/statsmodels/tools/validation/validation.py --- a/statsmodels/tools/validation/validation.py +++ b/statsmodels/tools/validation/validation.py @@ -59,7 +59,7 @@ def array_like( of obj (if present) or uses NumPy to automatically detect the dtype ndim : {int, None} Required number of dimensions of obj. If None, no check is performed. - If the numebr of dimensions of obj is less than ndim, additional axes + If the number of dimensions of obj is less than ndim, additional axes are inserted on the right. See examples. maxdim : {int, None} Maximum allowed dimension. Use ``maxdim`` instead of ``ndim`` when diff --git a/statsmodels/tsa/stattools.py b/statsmodels/tsa/stattools.py --- a/statsmodels/tsa/stattools.py +++ b/statsmodels/tsa/stattools.py @@ -15,6 +15,7 @@ import pandas as pd from scipy import stats from scipy.interpolate import interp1d +from scipy.signal import correlate from statsmodels.regression.linear_model import OLS, yule_walker from statsmodels.tools.sm_exceptions import ( @@ -397,7 +398,7 @@ def acovf(x, adjusted=False, demean=True, fft=True, missing="none", nlag=None): If True, then denominators is n-k, otherwise n. demean : bool, default True If True, then subtract the mean x from each element of x. - fft : bool, default None + fft : bool, default True If True, use FFT convolution. This method should be preferred for long time series. missing : str, default "none" @@ -586,7 +587,7 @@ def acf( qstat : bool, default False If True, returns the Ljung-Box q statistic for each autocorrelation coefficient. See q_stat for more information. - fft : bool, default None + fft : bool, default True If True, computes the ACF via FFT. alpha : scalar, default None If a number is given, the confidence intervals for the given level are @@ -731,8 +732,7 @@ def pacf_yw(x, nlags=None, method="adjusted"): nlags = int_like(nlags, "nlags", optional=True) nobs = x.shape[0] if nlags is None: - if nlags is None: - nlags = min(int(10 * np.log10(nobs)), nobs - 1) + nlags = min(int(10 * np.log10(nobs)), nobs - 1) method = string_like(method, "method", options=("adjusted", "mle")) pacf = [1.0] @@ -1019,7 +1019,7 @@ def pacf(x, nlags=None, method="ywadjusted", alpha=None): @deprecate_kwarg("unbiased", "adjusted") -def ccovf(x, y, adjusted=True, demean=True): +def ccovf(x, y, adjusted=True, demean=True, fft=True): """ Calculate the crosscovariance between two series. @@ -1028,24 +1028,23 @@ def ccovf(x, y, adjusted=True, demean=True): x, y : array_like The time series data to use in the calculation. adjusted : bool, optional - If True, then denominators for autocovariance is n-k, otherwise n. + If True, then denominators for crosscovariance is n-k, otherwise n. demean : bool, optional Flag indicating whether to demean x and y. + fft : bool, default True + If True, use FFT convolution. This method should be preferred + for long time series. Returns ------- ndarray The estimated crosscovariance function. - - Notes - ----- - This uses np.correlate which does full convolution. For very long time - series it is recommended to use fft convolution instead. """ x = array_like(x, "x") y = array_like(y, "y") adjusted = bool_like(adjusted, "adjusted") demean = bool_like(demean, "demean") + fft = bool_like(fft, "fft", optional=False) n = len(x) if demean: @@ -1055,24 +1054,28 @@ def ccovf(x, y, adjusted=True, demean=True): xo = x yo = y if adjusted: - xi = np.ones(n) - d = np.correlate(xi, xi, "full") + d = np.arange(n, 0, -1) else: d = n - return (np.correlate(xo, yo, "full") / d)[n - 1 :] + + method = "fft" if fft else "direct" + return correlate(xo, yo, "full", method=method)[n - 1:] / d @deprecate_kwarg("unbiased", "adjusted") -def ccf(x, y, adjusted=True): +def ccf(x, y, adjusted=True, fft=True): """ The cross-correlation function. Parameters ---------- x, y : array_like - The time series data to use in the calculation. + The time series data to use in the calculation. adjusted : bool - If True, then denominators for autocovariance is n-k, otherwise n. + If True, then denominators for cross-correlation is n-k, otherwise n. + fft : bool, default True + If True, use FFT convolution. This method should be preferred + for long time series. Returns ------- @@ -1081,16 +1084,14 @@ def ccf(x, y, adjusted=True): Notes ----- - This is based np.correlate which does full convolution. For very long time - series it is recommended to use fft convolution instead. - If adjusted is true, the denominator for the autocovariance is adjusted. """ x = array_like(x, "x") y = array_like(y, "y") adjusted = bool_like(adjusted, "adjusted") + fft = bool_like(fft, "fft", optional=False) - cvf = ccovf(x, y, adjusted=adjusted, demean=True) + cvf = ccovf(x, y, adjusted=adjusted, demean=True, fft=fft) return cvf / (np.std(x) * np.std(y))
diff --git a/statsmodels/tsa/tests/test_stattools.py b/statsmodels/tsa/tests/test_stattools.py --- a/statsmodels/tsa/tests/test_stattools.py +++ b/statsmodels/tsa/tests/test_stattools.py @@ -30,6 +30,7 @@ from statsmodels.tsa.stattools import ( acf, acovf, + ccovf, adfuller, arma_order_select_ic, breakvar_heteroskedasticity_test, @@ -981,8 +982,7 @@ def test_acovf2d(reset_randomstate): @pytest.mark.parametrize("demean", [True, False]) @pytest.mark.parametrize("adjusted", [True, False]) -def test_acovf_fft_vs_convolution(demean, adjusted): - np.random.seed(1) +def test_acovf_fft_vs_convolution(demean, adjusted, reset_randomstate): q = np.random.normal(size=100) F1 = acovf(q, demean=demean, adjusted=adjusted, fft=True) @@ -990,6 +990,28 @@ def test_acovf_fft_vs_convolution(demean, adjusted): assert_almost_equal(F1, F2, decimal=7) [email protected]("demean", [True, False]) [email protected]("adjusted", [True, False]) +def test_ccovf_fft_vs_convolution(demean, adjusted, reset_randomstate): + x = np.random.normal(size=128) + y = np.random.normal(size=128) + + F1 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=False) + F2 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=True) + assert_almost_equal(F1, F2, decimal=7) + + [email protected]("demean", [True, False]) [email protected]("adjusted", [True, False]) [email protected]("fft", [True, False]) +def test_compare_acovf_vs_ccovf(demean, adjusted, fft, reset_randomstate): + x = np.random.normal(size=128) + + F1 = acovf(x, demean=demean, adjusted=adjusted, fft=fft) + F2 = ccovf(x, x, demean=demean, adjusted=adjusted, fft=fft) + assert_almost_equal(F1, F2, decimal=7) + + @pytest.mark.smoke @pytest.mark.slow def test_arma_order_select_ic(): @@ -999,7 +1021,7 @@ def test_arma_order_select_ic(): arparams = np.array([0.75, -0.25]) maparams = np.array([0.65, 0.35]) arparams = np.r_[1, -arparams] - maparam = np.r_[1, maparams] + maparam = np.r_[1, maparams] # FIXME: Never used nobs = 250 np.random.seed(2014) y = arma_generate_sample(arparams, maparams, nobs)
Speed up the cross-correlation function Dears, I noticed that the computation of the cross corrlation function is extremely slower than the autocorrelation function. It seems to me that this is because of two reasons, but I am not an expert in the field: * acf function lets fix the nlags parameter and ccf does not, the latter compute every lag (commonly unneceary) * acf 'does a trick' with FFT while ccf does not. Is it possible to speed up the ccf?
It's possible to add all the same methods as in adf to ccf. I guess, without looking, that acf and ccf could share most of the code
"2021-09-12T19:07:42Z"
0.13dev
[ "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_store_str", "statsmodels/tsa/tests/test_stattools.py::TestCoint_t::test_tstat", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[two-sided-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_use_chi2", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_1d_input", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[0.5-10-0.09048484886749095]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_2d_input_with_missing_values", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[decreasing-0.024390243902439025-0.9761904761904762]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[2-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[increasing-41-0.023809523809523808]", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_confint", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::test_coint_identical_series", "statsmodels/tsa/tests/test_stattools.py::test_coint_perfect_collinearity", "statsmodels/tsa/tests/test_stattools.py::test_coint", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_ar", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_levinson_durbin_acov", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_rtol", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf2d", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_pandasacovf", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_pandas", "statsmodels/tsa/tests/test_stattools.py::test_innovations_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_filter_kalman_filter", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_levinson_durbin", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_maxlag_too_large", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg_error", "statsmodels/tsa/tests/test_stattools.py::test_acf_fft_dataframe", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_short_series", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic_failure", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_error", "statsmodels/tsa/tests/test_stattools.py::test_pacf_nlags_error", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset0]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset2]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset1]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset3]", "statsmodels/tsa/tests/test_stattools.py::test_acf_conservate_nanops", "statsmodels/tsa/tests/test_stattools.py::test_coint_auto_tstat", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_gnpdef_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_regression_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnp_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rand10000_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[Aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnpq_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_trim_value", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_array_shape", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_stkprc_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[AIC]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_autolag_type", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_finite_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality_single", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_zero_lag", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_qstat_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_drop", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_raise", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_conservative", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_yw", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols_inefficient", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ld", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_autolags_does_not_assign_lags_equal_to_nobs", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_unknown_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_store", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_unclear_hypothesis", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_legacy_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_lags", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_store", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_teststat" ]
[ "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-False]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-False]" ]
Python
[]
[]
statsmodels/statsmodels
7,748
statsmodels__statsmodels-7748
[ "7747" ]
f78148ec4423bb7b06873a41e7f2abcd6edb511c
diff --git a/statsmodels/iolib/summary.py b/statsmodels/iolib/summary.py --- a/statsmodels/iolib/summary.py +++ b/statsmodels/iolib/summary.py @@ -718,7 +718,6 @@ def summary_return(tables, return_fmt='text'): elif return_fmt == 'latex': # TODO: insert \hline after updating SimpleTable table = copy.deepcopy(tables[0]) - del table[-1] for part in tables[1:]: table.extend(part) return table.as_latex_tabular()
diff --git a/statsmodels/discrete/tests/test_discrete.py b/statsmodels/discrete/tests/test_discrete.py --- a/statsmodels/discrete/tests/test_discrete.py +++ b/statsmodels/discrete/tests/test_discrete.py @@ -142,6 +142,16 @@ def test_jac(self): score = self.res1.model.score(self.res1.params) assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ? + def test_summary_latex(self): + # see #7747, last line of top table was dropped + summ = self.res1.summary() + ltx = summ.as_latex() + n_lines = len(ltx.splitlines()) + if not isinstance(self.res1.model, MNLogit): + # skip MNLogit which creates several params tables + assert n_lines == 19 + np.size(self.res1.params) + assert "Covariance Type:" in ltx + class CheckBinaryResults(CheckModelResults): def test_pred_table(self): diff --git a/statsmodels/regression/tests/test_regression.py b/statsmodels/regression/tests/test_regression.py --- a/statsmodels/regression/tests/test_regression.py +++ b/statsmodels/regression/tests/test_regression.py @@ -994,6 +994,7 @@ def test_summary_as_latex(): \\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\ \\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\ \\textbf{Df Model:} & 6 & \\textbf{ } & \\\\ +\\textbf{Covariance Type:} & nonrobust & \\textbf{ } & \\\\ \\bottomrule \\end{tabular} \\begin{tabular}{lcccccc}
summary.as_latex() output missing 1 row #### Describe the bug i compared the console output summary() and summary.as_latex(), found that the last row of result table gone missing. #### Code Sample, a copy-pastable example if possible ```python print(logit.summary().as_latex()) print(logit.summary()) ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `main`. If your problem has been fixed in an unreleased version, you might be able to use `main` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the main branch of this repository? It helps the limited resources if we know problems exist in the current main branch so that they do not need to check whether the code sample produces a bug in the next release. [summary() output](https://github.com/BecksIsAlreadyTaken/Troubleshooting/blob/main/Screenshot%20from%202021-09-23%2023-31-14.png) [summary().as_latex() output](https://github.com/BecksIsAlreadyTaken/Troubleshooting/blob/main/Screenshot%20from%202021-09-23%2023-31-35.png) Here the row containing "Covariance Type:" and "LLR p-value" went missing. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output A full result table. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] INSTALLED VERSIONS ------------------ Python: 3.9.0.final.0 OS: Linux 5.4.0-86-generic #97-Ubuntu SMP Fri Sep 17 19:19:40 UTC 2021 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.12.2 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.21.2 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/numpy) scipy: 1.7.1 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/scipy) pandas: 1.3.3 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/pandas) dateutil: 2.8.2 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/dateutil) patsy: 0.5.1 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.4.3 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/matplotlib) backend: TkAgg cvxopt: Not installed joblib: 1.0.1 (/home/beck/anaconda3/envs/main/lib/python3.9/site-packages/joblib) Developer Tools ================ IPython: Not installed jinja2: Not installed sphinx: Not installed pygments: Not installed pytest: Not installed virtualenv: Not installed </details>
~~I don't see it.~~ Both pictures show the same 6 parameter rows last line of top table is missing. It's better to post the text version than a screen shot. [image](https://github.com/BecksIsAlreadyTaken/Troubleshooting/blob/main/Screenshot%20from%202021-09-24%2000-06-52.png) missing the "Covariance Type:" and "LLR p-value" \begin{center} \begin{tabular}{lclc} \toprule \textbf{Dep. Variable:} & DEFAULT & \textbf{ No. Observations: } & 9990 \\ \textbf{Model:} & Logit & \textbf{ Df Residuals: } & 9984 \\ \textbf{Method:} & MLE & \textbf{ Df Model: } & 5 \\ \textbf{Date:} & Fri, 24 Sep 2021 & \textbf{ Pseudo R-squ.: } & 0.04225 \\ \textbf{Time:} & 00:06:07 & \textbf{ Log-Likelihood: } & -4068.5 \\ \textbf{converged:} & True & \textbf{ LL-Null: } & -4247.9 \\ \bottomrule \end{tabular} \begin{tabular}{lcccccc} & \textbf{coef} & \textbf{std err} & \textbf{z} & \textbf{P$> |$z$|$} & \textbf{[0.025} & \textbf{0.975]} \\ \midrule \textbf{HOUSE} & 0.1795 & 0.066 & 2.729 & 0.006 & 0.051 & 0.308 \\ \textbf{CAR} & -0.6911 & 0.070 & -9.815 & 0.000 & -0.829 & -0.553 \\ \textbf{HOUSE\_L} & -0.3912 & 0.085 & -4.623 & 0.000 & -0.557 & -0.225 \\ \textbf{CAR\_L} & 0.2089 & 0.124 & 1.684 & 0.092 & -0.034 & 0.452 \\ \textbf{EDUCATION} & -0.6296 & 0.029 & -21.623 & 0.000 & -0.687 & -0.573 \\ \textbf{WORKTIME} & -0.0800 & 0.022 & -3.607 & 0.000 & -0.124 & -0.037 \\ \bottomrule \end{tabular} %\caption{Logit Regression Results} \end{center} BUG, I can replicate it using one of our unit test examples. converting only the top table works correctly and includes the last line with `LLR p-value:` ``` summ = t.res1.summary() print(summ.tables[0].as_latex_tabular()) ``` Looks like there is a bug in merging the latex tables same problem with Probit. and same problem in OLS summary for latex has a del table[-1] I guess this was supposed to remove a `======` line ```def summary_return(tables, return_fmt='text'): # join table parts then print if return_fmt == 'text': .... elif return_fmt == 'latex': # TODO: insert \hline after updating SimpleTable table = copy.deepcopy(tables[0]) del table[-1] for part in tables[1:]: table.extend(part) return table.as_latex_tabular() ``` After a while of digging a possible candidate is https://github.com/statsmodels/statsmodels/pull/5814/files#diff-157f412d854ff1baeac4f504cb16fb5c7caf50c3f1762d6658c362cd4468d8ec replaces "=" by ":" in defining dict, but replaced "=" also from content, dec_below. reformatting doesn't give a useful diff But it looks like those mistakes are not in the fmt that are used ??? the `del table[-1]` is 10 years old from original code, so something else must have changed. Problem is that `summary` has almost only smoke tests. removing `del table[-1]` fixes the problem. But I have no guess how long this bug already occurred. @BecksIsAlreadyTaken Thanks a lot for reporting, problems in summary and plots are difficult to catch besides visual inspection. I will have a PR later today, removing the del, and with unit tests for the number of lines in the latex return, that passes for the test_discrete cases. I guess this is old, There is a unit test for as_latex in regression/test_regression that compares the full latex summary text, added 9 years ago. cov_type was added to summary 8 years ago, but that unit test never failed. The unit test fails now after fixing this. This might even have been wrong since the beginning, initial refactorings. #1214 needed to add a line to the latex text in the unit test.
"2021-09-23T19:57:03Z"
0.13dev
[ "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_converged", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestSweepAlphaL1::test_sweep_alpha", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::test_binary_pred_table_zeros", "statsmodels/discrete/tests/test_discrete.py::test_predict_with_exposure", "statsmodels/discrete/tests/test_discrete.py::test_issue_341", "statsmodels/discrete/tests/test_discrete.py::test_isdummy", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor_categorical", "statsmodels/discrete/tests/test_discrete.py::test_non_binary", "statsmodels/discrete/tests/test_discrete.py::test_formula_missing_exposure", "statsmodels/discrete/tests/test_discrete.py::test_poisson_predict", "statsmodels/discrete/tests/test_discrete.py::test_iscount", "statsmodels/discrete/tests/test_discrete.py::test_poisson_newton", "statsmodels/discrete/tests/test_discrete.py::test_perfect_prediction", "statsmodels/discrete/tests/test_discrete.py::test_issue_339", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_float_name", "statsmodels/discrete/tests/test_discrete.py::test_null_options", "statsmodels/discrete/tests/test_discrete.py::test_cov_confint_pandas", "statsmodels/discrete/tests/test_discrete.py::test_unchanging_degrees_of_freedom", "statsmodels/discrete/tests/test_discrete.py::test_optim_kwds_prelim", "statsmodels/discrete/tests/test_discrete.py::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_dummy_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_newton", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_mean_var", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_basic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_wald", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_score", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_hessian", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_fit_regularized", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_t", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_diagnostic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p2", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p1", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroMNLogit::test_basic_results", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_demean", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_nonnested", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_homoskedastic", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_LRversion", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_nodemean", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_num", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared", "statsmodels/regression/tests/test_regression.py::test_wls_tss", "statsmodels/regression/tests/test_regression.py::test_wls_example", "statsmodels/regression/tests/test_regression.py::test_wls_missing", "statsmodels/regression/tests/test_regression.py::test_const_indicator", "statsmodels/regression/tests/test_regression.py::test_bad_size", "statsmodels/regression/tests/test_regression.py::test_fvalue_const_only", "statsmodels/regression/tests/test_regression.py::test_conf_int_single_regressor", "statsmodels/regression/tests/test_regression.py::test_fvalue_implicit_constant", "statsmodels/regression/tests/test_regression.py::test_burg_errors", "statsmodels/regression/tests/test_regression.py::test_bool_regressor", "statsmodels/regression/tests/test_regression.py::test_missing_formula_predict", "statsmodels/regression/tests/test_regression.py::test_regularized_options", "statsmodels/regression/tests/test_regression.py::test_regularized_predict", "statsmodels/regression/tests/test_regression.py::test_burg", "statsmodels/regression/tests/test_regression.py::test_ridge", "statsmodels/regression/tests/test_regression.py::test_fvalue_only_constant", "statsmodels/regression/tests/test_regression.py::test_ols_constant", "statsmodels/regression/tests/test_regression.py::test_summary_no_constant", "statsmodels/regression/tests/test_regression.py::test_regularized_refit", "statsmodels/regression/tests/test_regression.py::test_formula_missing_cat", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_sd", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_bic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_scale", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_loglike", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_degrees", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_wresid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_aic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_ess", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_params", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestYuleWalker::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params_none", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_new_ftest", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_num", "statsmodels/regression/tests/test_regression.py::TestNonFit::test_df_resid", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_empty_model", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights_list", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights", "statsmodels/regression/tests/test_regression.py::TestWLS_CornerCases::test_wrong_size_weights", "statsmodels/regression/tests/test_regression.py::TestFtest::test_p", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_num", "statsmodels/regression/tests/test_regression.py::TestFtest::test_F", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_denom", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestTtest::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestTtest::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest::test_sd", "statsmodels/regression/tests/test_regression.py::TestTtest::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_new_tvalue", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj_overfit", "statsmodels/regression/tests/test_regression.py::TestOLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestOLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestOLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC1_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC2_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_alternatives", "statsmodels/regression/tests/test_regression.py::TestOLS::test_eigenvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_normalized_cov_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_summary_slim", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC0_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestOLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC3_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestOLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid_zero_variance", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestOLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_2d", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_1d", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_scale", "statsmodels/regression/tests/test_regression.py::TestRTO::test_resids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_wresid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestRTO::test_ess", "statsmodels/regression/tests/test_regression.py::TestRTO::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestRTO::test_bic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestRTO::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestRTO::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestRTO::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_params", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestRTO::test_degrees", "statsmodels/regression/tests/test_regression.py::TestRTO::test_aic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS::test_fittedvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestGLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS::test_resid", "statsmodels/regression/tests/test_regression.py::TestGLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS::test_tvalues" ]
[ "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_summary_latex", "statsmodels/regression/tests/test_regression.py::test_summary_as_latex" ]
Python
[]
[]
statsmodels/statsmodels
7,751
statsmodels__statsmodels-7751
[ "7755" ]
4cf63aaae36581c04bb891ad49ab1ea3be76fb2f
diff --git a/statsmodels/base/_penalized.py b/statsmodels/base/_penalized.py --- a/statsmodels/base/_penalized.py +++ b/statsmodels/base/_penalized.py @@ -51,7 +51,7 @@ def __init__(self, *args, **kwds): self._init_keys.extend(['penal', 'pen_weight']) self._null_drop_keys = getattr(self, '_null_drop_keys', []) - self._null_drop_keys.extend(['penal']) + self._null_drop_keys.extend(['penal', 'pen_weight']) def _handle_scale(self, params, scale=None, **kwds): diff --git a/statsmodels/base/model.py b/statsmodels/base/model.py --- a/statsmodels/base/model.py +++ b/statsmodels/base/model.py @@ -29,6 +29,8 @@ from statsmodels.tools.tools import nan_dot, recipr from statsmodels.tools.validation import bool_like +ERROR_INIT_KWARGS = False + _model_params_doc = """Parameters ---------- endog : array_like @@ -79,6 +81,10 @@ class Model(object): # Default is 1, which is more common. Override in models when needed # Set to None to skip check _formula_max_endog = 1 + # kwargs that are generically allowed, maybe not supported in all models + _kwargs_allowed = [ + "missing", 'missing_idx', 'formula', 'design_info', "hasconst", + ] def __init__(self, endog, exog=None, **kwargs): missing = kwargs.pop('missing', 'none') @@ -106,6 +112,22 @@ def _get_init_kwds(self): return kwds + def _check_kwargs(self, kwargs, keys_extra=None, error=ERROR_INIT_KWARGS): + + kwargs_allowed = [ + "missing", 'missing_idx', 'formula', 'design_info', "hasconst", + ] + if keys_extra: + kwargs_allowed.extend(keys_extra) + + kwargs_invalid = [i for i in kwargs if i not in kwargs_allowed] + if kwargs_invalid: + msg = "unknown kwargs " + repr(kwargs_invalid) + if error is False: + warnings.warn(msg, ValueWarning) + else: + raise ValueError(msg) + def _handle_data(self, endog, exog, missing, hasconst, **kwargs): data = handle_data(endog, exog, missing, hasconst, **kwargs) # kwargs arrays could have changed, easier to just attach here diff --git a/statsmodels/discrete/discrete_model.py b/statsmodels/discrete/discrete_model.py --- a/statsmodels/discrete/discrete_model.py +++ b/statsmodels/discrete/discrete_model.py @@ -457,8 +457,9 @@ class BinaryModel(DiscreteModel): _continuous_ok = False def __init__(self, endog, exog, check_rank=True, **kwargs): + # unconditional check, requires no extra kwargs added by subclasses + self._check_kwargs(kwargs) super().__init__(endog, exog, check_rank, **kwargs) - if not issubclass(self.__class__, MultinomialModel): if not np.all((self.endog >= 0) & (self.endog <= 1)): raise ValueError("endog must be in the unit interval.") @@ -774,6 +775,7 @@ def _derivative_exog(self, params, exog=None, transform='dydx', class CountModel(DiscreteModel): def __init__(self, endog, exog, offset=None, exposure=None, missing='none', check_rank=True, **kwargs): + self._check_kwargs(kwargs) super().__init__(endog, exog, check_rank, missing=missing, offset=offset, exposure=exposure, **kwargs) if exposure is not None: diff --git a/statsmodels/distributions/copula/archimedean.py b/statsmodels/distributions/copula/archimedean.py --- a/statsmodels/distributions/copula/archimedean.py +++ b/statsmodels/distributions/copula/archimedean.py @@ -168,6 +168,7 @@ def pdf(self, u, args=()): return a * b ** c def logpdf(self, u, args=()): + # we skip Archimedean logpdf, that uses numdiff return super(ArchimedeanCopula, self).logpdf(u, args=args) def cdf(self, u, args=()): @@ -268,6 +269,7 @@ def logpdf(self, u, args=()): return pdf else: # for now use generic from base Copula class, log(self.pdf(...)) + # we skip Archimedean logpdf, that uses numdiff super(ArchimedeanCopula, self).logpdf(u, args) def cdfcond_2g1(self, u, args=()): @@ -383,6 +385,7 @@ def cdf(self, u, args=()): return cdf def logpdf(self, u, args=()): + # we skip Archimedean logpdf, that uses numdiff return super(ArchimedeanCopula, self).logpdf(u, args=args) def tau(self, theta=None): diff --git a/statsmodels/genmod/generalized_estimating_equations.py b/statsmodels/genmod/generalized_estimating_equations.py --- a/statsmodels/genmod/generalized_estimating_equations.py +++ b/statsmodels/genmod/generalized_estimating_equations.py @@ -496,9 +496,10 @@ def __init__(self, endog, exog, groups, time=None, family=None, exposure=None, dep_data=None, constraint=None, update_dep=True, weights=None, **kwargs): + if type(self) is GEE: + self._check_kwargs(kwargs) if family is not None: if not isinstance(family.link, tuple(family.safe_links)): - import warnings msg = ("The {0} link function does not respect the " "domain of the {1} family.") warnings.warn(msg.format(family.link.__class__.__name__, @@ -543,6 +544,12 @@ def __init__(self, endog, exog, groups, time=None, family=None, self._init_keys.extend(["update_dep", "constraint", "family", "cov_struct"]) + # remove keys added by super that are not supported + try: + self._init_keys.remove("freq_weights") + self._init_keys.remove("var_weights") + except ValueError: + pass # Handle the family argument if family is None: diff --git a/statsmodels/genmod/generalized_linear_model.py b/statsmodels/genmod/generalized_linear_model.py --- a/statsmodels/genmod/generalized_linear_model.py +++ b/statsmodels/genmod/generalized_linear_model.py @@ -289,14 +289,8 @@ def __init__(self, endog, exog, family=None, offset=None, exposure=None, freq_weights=None, var_weights=None, missing='none', **kwargs): - # TODO: this can be applied to many models - # 'n_trials' is specific to GLM Binomial - kwargs_allowed = ["missing", 'missing_idx', 'formula', 'design_info', - "hasconst", 'n_trials'] - kwargs_invalid = [i for i in kwargs if i not in kwargs_allowed] - if kwargs_invalid and type(self) is GLM: - warnings.warn("unknown kwargs" + repr(kwargs_invalid), - UserWarning) + if type(self) is GLM: + self._check_kwargs(kwargs, ['n_trials']) if (family is not None) and not isinstance(family.link, tuple(family.safe_links)): @@ -1665,8 +1659,10 @@ def null(self): model = self.model exog = np.ones((len(endog), 1)) - kwargs = model._get_init_kwds() + kwargs = model._get_init_kwds().copy() kwargs.pop('family') + for key in getattr(model, '_null_drop_keys', []): + del kwargs[key] start_params = np.atleast_1d(self.family.link(endog.mean())) oe = self.model._offset_exposure if not (np.size(oe) == 1 and oe == 0): diff --git a/statsmodels/regression/linear_model.py b/statsmodels/regression/linear_model.py --- a/statsmodels/regression/linear_model.py +++ b/statsmodels/regression/linear_model.py @@ -47,7 +47,10 @@ # need import in module instead of lazily to copy `__doc__` from statsmodels.regression._prediction import PredictionResults from statsmodels.tools.decorators import cache_readonly, cache_writable -from statsmodels.tools.sm_exceptions import InvalidTestWarning +from statsmodels.tools.sm_exceptions import ( + InvalidTestWarning, + ValueWarning, + ) from statsmodels.tools.tools import pinv_extended from statsmodels.tools.validation import string_like @@ -498,6 +501,8 @@ class GLS(RegressionModel): def __init__(self, endog, exog, sigma=None, missing='none', hasconst=None, **kwargs): + if type(self) is GLS: + self._check_kwargs(kwargs) # TODO: add options igls, for iterative fgls if sigma is None # TODO: default if sigma is none should be two-step GLS sigma, cholsigmainv = _get_sigma(sigma, len(endog)) @@ -620,7 +625,14 @@ def fit_regularized(self, method="elastic_net", alpha=0., # Need to adjust since RSS/n term in elastic net uses nominal # n in denominator if self.sigma is not None: - alpha = alpha * np.sum(1 / np.diag(self.sigma)) / len(self.endog) + if self.sigma.ndim == 2: + var_obs = np.diag(self.sigma) + elif self.sigma.ndim == 1: + var_obs = self.sigma + else: + raise ValueError("sigma should be 1-dim or 2-dim") + + alpha = alpha * np.sum(1 / var_obs) / len(self.endog) rslt = OLS(self.wendog, self.wexog).fit_regularized( method=method, alpha=alpha, @@ -689,6 +701,8 @@ class WLS(RegressionModel): def __init__(self, endog, exog, weights=1., missing='none', hasconst=None, **kwargs): + if type(self) is WLS: + self._check_kwargs(kwargs) weights = np.array(weights) if weights.shape == (): if (missing == 'drop' and 'missing_idx' in kwargs and @@ -871,11 +885,18 @@ class OLS(WLS): def __init__(self, endog, exog=None, missing='none', hasconst=None, **kwargs): + if "weights" in kwargs: + msg = ("Weights are not supported in OLS and will be ignored" + "An exception will be raised in the next version.") + warnings.warn(msg, ValueWarning) super(OLS, self).__init__(endog, exog, missing=missing, hasconst=hasconst, **kwargs) if "weights" in self._init_keys: self._init_keys.remove("weights") + if type(self) is OLS: + self._check_kwargs(kwargs, ["offset"]) + def loglike(self, params, scale=None): """ The likelihood function for the OLS model. diff --git a/statsmodels/regression/quantile_regression.py b/statsmodels/regression/quantile_regression.py --- a/statsmodels/regression/quantile_regression.py +++ b/statsmodels/regression/quantile_regression.py @@ -75,6 +75,7 @@ class QuantReg(RegressionModel): ''' def __init__(self, endog, exog, **kwargs): + self._check_kwargs(kwargs) super(QuantReg, self).__init__(endog, exog, **kwargs) def whiten(self, data): diff --git a/statsmodels/robust/robust_linear_model.py b/statsmodels/robust/robust_linear_model.py --- a/statsmodels/robust/robust_linear_model.py +++ b/statsmodels/robust/robust_linear_model.py @@ -107,6 +107,7 @@ class RLM(base.LikelihoodModel): def __init__(self, endog, exog, M=None, missing='none', **kwargs): + self._check_kwargs(kwargs) self.M = M if M is not None else norms.HuberT() super(base.LikelihoodModel, self).__init__(endog, exog, missing=missing, **kwargs)
diff --git a/statsmodels/discrete/tests/test_discrete.py b/statsmodels/discrete/tests/test_discrete.py --- a/statsmodels/discrete/tests/test_discrete.py +++ b/statsmodels/discrete/tests/test_discrete.py @@ -45,6 +45,7 @@ ConvergenceWarning, PerfectSeparationError, SpecificationWarning, + ValueWarning, ) from .results.results_discrete import Anes, DiscreteL1, RandHIE, Spector @@ -392,12 +393,13 @@ def setup_class(cls): res2 = Spector.probit cls.res2 = res2 - @pytest.mark.xfail(reason="res2 has no predict attribute", - raises=AttributeError, strict=True) - def test_predict(self): - assert_almost_equal(self.res1.model.predict(self.res1.params), - self.res2.predict, - DECIMAL_4) + def test_init_kwargs(self): + endog = self.res1.model.endog + exog = self.res1.model.exog + z = np.ones(len(endog)) + with pytest.warns(ValueWarning, match="unknown kwargs"): + # unsupported keyword + Probit(endog, exog, weights=z) class TestProbitBFGS(CheckBinaryResults): diff --git a/statsmodels/gam/tests/test_penalized.py b/statsmodels/gam/tests/test_penalized.py --- a/statsmodels/gam/tests/test_penalized.py +++ b/statsmodels/gam/tests/test_penalized.py @@ -12,6 +12,8 @@ from numpy.testing import assert_allclose, assert_equal, assert_ import pandas as pd +import pytest + import patsy from statsmodels.discrete.discrete_model import Poisson, Logit, Probit @@ -91,6 +93,10 @@ def test_fitted(self): assert_allclose(res1.fittedvalues, res2.fitted_values, rtol=self.rtol_fitted) + @pytest.mark.smoke + def test_null_smoke(self): + self.res1.llnull + class TestTheilPLS5(CheckGAMMixin): @@ -120,6 +126,9 @@ def test_cov_robust(self): assert_allclose(np.asarray(res1.cov_params()), res2.Ve * self.covp_corrfact, rtol=1e-4) + def test_null_smoke(self): + pytest.skip("llnull not available") + class TestGLMPenalizedPLS5(CheckGAMMixin): diff --git a/statsmodels/genmod/tests/test_gee.py b/statsmodels/genmod/tests/test_gee.py --- a/statsmodels/genmod/tests/test_gee.py +++ b/statsmodels/genmod/tests/test_gee.py @@ -1803,7 +1803,6 @@ def test_plots(close_figures): model = gee.GEE(exog, endog, groups) result = model.fit() - fig = result.plot_added_variable(1) assert_equal(isinstance(fig, plt.Figure), True) fig = result.plot_partial_residuals(1) @@ -1988,16 +1987,16 @@ def test_quasipoisson(reg): grp = np.kron(np.arange(100), np.ones(n // 100)) model1 = gee.GEE(y, x, family=families.Poisson(), groups=grp, - cov_type="naive") + ) model2 = gee.GEE(y, x, family=families.Poisson(), groups=grp, - cov_type="naive") + ) if reg: result1 = model1.fit_regularized(pen_wt=0.1) result2 = model2.fit_regularized(pen_wt=0.1, scale="X2") else: - result1 = model1.fit() - result2 = model2.fit(scale="X2") + result1 = model1.fit(cov_type="naive") + result2 = model2.fit(scale="X2", cov_type="naive") # The parameter estimates are the same regardless of how # the scale parameter is handled diff --git a/statsmodels/genmod/tests/test_glm.py b/statsmodels/genmod/tests/test_glm.py --- a/statsmodels/genmod/tests/test_glm.py +++ b/statsmodels/genmod/tests/test_glm.py @@ -12,7 +12,6 @@ assert_array_less, assert_equal, assert_raises, - assert_warns, ) import pandas as pd from pandas.testing import assert_series_equal @@ -32,6 +31,7 @@ from statsmodels.tools.sm_exceptions import ( DomainWarning, PerfectSeparationError, + ValueWarning, ) from statsmodels.tools.tools import add_constant @@ -1736,7 +1736,8 @@ def setup_class(cls): def test_init_kwargs(self): family_link = sm.families.Gamma(sm.families.links.log()) - with assert_warns(UserWarning): + + with pytest.warns(ValueWarning, match="unknown kwargs"): GLM(self.endog, self.exog, family=family_link, weights=self.weight, # incorrect keyword ) diff --git a/statsmodels/regression/tests/test_predict.py b/statsmodels/regression/tests/test_predict.py --- a/statsmodels/regression/tests/test_predict.py +++ b/statsmodels/regression/tests/test_predict.py @@ -264,7 +264,7 @@ def test_predict_remove_data(): # GH6887 endog = [i + np.random.normal(scale=0.1) for i in range(100)] exog = [i for i in range(100)] - model = OLS(endog, exog, weights=[1 for _ in range(100)]).fit() + model = WLS(endog, exog, weights=[1 for _ in range(100)]).fit() # we need to compute scale before we remove wendog, wexog model.scale model.remove_data() diff --git a/statsmodels/regression/tests/test_regression.py b/statsmodels/regression/tests/test_regression.py --- a/statsmodels/regression/tests/test_regression.py +++ b/statsmodels/regression/tests/test_regression.py @@ -753,6 +753,7 @@ def setup_class(cls): cls.results = [] cls.results.append(OLS(y, x).fit()) cls.results.append(WLS(y, x, w).fit()) + # scaling weights does not change main results (except scale) cls.results.append(GLS(y, x, 100 * w).fit()) cls.results.append(GLS(y, x, np.diag(0.1 * w)).fit()) @@ -800,6 +801,7 @@ def setup_class(cls): w_inv = 1.0 / w cls.results = [] cls.results.append(WLS(y, x, w).fit()) + # scaling weights does not change main results (except scale) cls.results.append(WLS(y, x, 0.01 * w).fit()) cls.results.append(GLS(y, x, 100 * w_inv).fit()) cls.results.append(GLS(y, x, np.diag(0.1 * w_inv)).fit()) @@ -1433,17 +1435,21 @@ def test_regularized_refit(): def test_regularized_predict(): + # this also compares WLS with GLS n = 100 p = 5 np.random.seed(3132) xmat = np.random.normal(size=(n, p)) yvec = xmat.sum(1) + np.random.normal(size=n) wgt = np.random.uniform(1, 2, n) - - for klass in WLS, GLS: - model1 = klass(yvec, xmat, weights=wgt) - result1 = model1.fit_regularized(alpha=2.0, L1_wt=0.5, refit=True) - + model_wls = WLS(yvec, xmat, weights=wgt) + # TODO: params is not the same in GLS if sigma=1 / wgt, i.e 1-dim, #7755 + model_gls1 = GLS(yvec, xmat, sigma=np.diag(1 / wgt)) + model_gls2 = GLS(yvec, xmat, sigma=1 / wgt) + res = [] + for model1 in [model_wls, model_gls1, model_gls2]: + result1 = model1.fit_regularized(alpha=20.0, L1_wt=0.5, refit=True) + res.append(result1) params = result1.params fittedvalues = np.dot(xmat, params) pr = model1.predict(result1.params) @@ -1453,6 +1459,16 @@ def test_regularized_predict(): pr = result1.predict() assert_allclose(fittedvalues, pr) + assert_allclose(res[0].model.wendog, res[1].model.wendog, rtol=1e-10) + assert_allclose(res[0].model.wexog, res[1].model.wexog, rtol=1e-10) + assert_allclose(res[0].fittedvalues, res[1].fittedvalues, rtol=1e-10) + assert_allclose(res[0].params, res[1].params, rtol=1e-10) + + assert_allclose(res[0].model.wendog, res[2].model.wendog, rtol=1e-10) + assert_allclose(res[0].model.wexog, res[2].model.wexog, rtol=1e-10) + assert_allclose(res[0].fittedvalues, res[2].fittedvalues, rtol=1e-10) + assert_allclose(res[0].params, res[2].params, rtol=1e-10) + def test_regularized_options(): n = 100
BUG: GLS fit_regularized, different params if sigma is 1-dim or diag 2 dim I don't see the problem right now. Needs investigating. Fixing a unit test for GLS.fit_regularized that used incorrect kwarg "weights" #7751 I think the results, specifically params, should be the same if `sigma = 1/ wghts` and `sigma = np.diag(1 / wgths)` The second matches the params from the `WLS(... weights=wgths)` estimate. The former looks like an unpenalized estimate my current version form test_regression the last two asserts fail if I use `model_gls = GLS(yvec, xmat, sigma=1 / wgt) ``` def test_regularized_predict(): n = 100 p = 5 np.random.seed(3132) xmat = np.random.normal(size=(n, p)) yvec = xmat.sum(1) + np.random.normal(size=n) wgt = np.random.uniform(1, 2, n) model_wls = WLS(yvec, xmat, weights=wgt) # TODO: params is not the same in GLS if sigma=1 / wgth, i.e 1-dim model_gls = GLS(yvec, xmat, sigma=np.diag(1 / wgt)) res = [] for model1 in [model_wls, model_gls]: result1 = model1.fit_regularized(alpha=20.0, L1_wt=0.5, refit=True) res.append(result1) params = result1.params fittedvalues = np.dot(xmat, params) pr = model1.predict(result1.params) assert_allclose(fittedvalues, pr) assert_allclose(result1.fittedvalues, pr) pr = result1.predict() assert_allclose(fittedvalues, pr) assert_allclose(res[0].model.wendog, res[1].model.wendog, rtol=1e-10) assert_allclose(res[0].model.wexog, res[1].model.wexog, rtol=1e-10) assert_allclose(res[0].fittedvalues, res[1].fittedvalues, rtol=1e-10) assert_allclose(res[0].params, res[1].params, rtol=1e-10) ```
"2021-09-24T15:40:42Z"
0.13dev
[ "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_scale", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_params", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_bic", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_resid", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_resid", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_missing", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_bic", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_endog_dtype", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog_formula", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution_binom_count", "statsmodels/genmod/tests/test_glm.py::TestRegularized::test_regularized", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_df", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_bse", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_residuals", "statsmodels/genmod/tests/test_glm.py::test_perfect_pred", "statsmodels/genmod/tests/test_glm.py::test_score_test_ols", "statsmodels/genmod/tests/test_glm.py::test_attribute_writable_resettable", "statsmodels/genmod/tests/test_glm.py::test_glm_start_params", "statsmodels/genmod/tests/test_glm.py::test_loglike_no_opt", "statsmodels/genmod/tests/test_glm.py::test_glm_irls_method", "statsmodels/genmod/tests/test_glm.py::test_summary", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls_eim", "statsmodels/genmod/tests/test_glm.py::test_formula_missing_exposure", "statsmodels/genmod/tests/test_glm.py::test_plots", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls", "statsmodels/genmod/tests/test_glm.py::test_wtd_patsy_missing", "statsmodels/genmod/tests/test_glm.py::test_glm_lasso_6431", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_upper_limit", "statsmodels/genmod/tests/test_glm.py::test_tweedie_elastic_net", "statsmodels/genmod/tests/test_glm.py::testTweediePowerEstimate", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_poisson_limit", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int32]", "statsmodels/genmod/tests/test_glm.py::test_qaic", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int8]", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int64]", "statsmodels/genmod/tests/test_glm.py::test_output_exposure_null", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int16]", "statsmodels/genmod/tests/test_glm.py::test_glm_bic_warning", "statsmodels/genmod/tests/test_glm.py::test_glm_bic", "statsmodels/genmod/tests/test_glm.py::test_tweedie_score", "statsmodels/genmod/tests/test_glm.py::test_non_invertible_hessian_fails_summary", "statsmodels/genmod/tests/test_glm.py::test_poisson_deviance", "statsmodels/genmod/tests/test_glm.py::test_int_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_mu", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_r", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_offset_exposure", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_missing", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_predict", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary2", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_dummy_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroMNLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_wald", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestSweepAlphaL1::test_sweep_alpha", "statsmodels/discrete/tests/test_discrete.py::test_issue_339", "statsmodels/discrete/tests/test_discrete.py::test_issue_341", "statsmodels/discrete/tests/test_discrete.py::test_non_binary", "statsmodels/discrete/tests/test_discrete.py::test_poisson_predict", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor_categorical", "statsmodels/discrete/tests/test_discrete.py::test_binary_pred_table_zeros", "statsmodels/discrete/tests/test_discrete.py::test_iscount", "statsmodels/discrete/tests/test_discrete.py::test_formula_missing_exposure", "statsmodels/discrete/tests/test_discrete.py::test_perfect_prediction", "statsmodels/discrete/tests/test_discrete.py::test_predict_with_exposure", "statsmodels/discrete/tests/test_discrete.py::test_poisson_newton", "statsmodels/discrete/tests/test_discrete.py::test_isdummy", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_float_name", "statsmodels/discrete/tests/test_discrete.py::test_unchanging_degrees_of_freedom", "statsmodels/discrete/tests/test_discrete.py::test_null_options", "statsmodels/discrete/tests/test_discrete.py::test_cov_confint_pandas", "statsmodels/discrete/tests/test_discrete.py::test_t_test", "statsmodels/discrete/tests/test_discrete.py::test_optim_kwds_prelim", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_converged", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_diagnostic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p1", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p2", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_basic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_mean_var", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_newton", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_fit_regularized", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_score", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_hessian", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNull::test_llnull", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_alternatives", "statsmodels/regression/tests/test_regression.py::TestOLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestOLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC2_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared_adj_overfit", "statsmodels/regression/tests/test_regression.py::TestOLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestOLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC0_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resid_zero_variance", "statsmodels/regression/tests/test_regression.py::TestOLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestOLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_qr_normalized_cov_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC3_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestOLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_summary_slim", "statsmodels/regression/tests/test_regression.py::TestOLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestOLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestOLS::test_HC1_errors", "statsmodels/regression/tests/test_regression.py::TestOLS::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestOLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestOLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestOLS::test_eigenvalues", "statsmodels/regression/tests/test_regression.py::TestOLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestOLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNxOne::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params_none", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_large_equal_params", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_large_data::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLSExogWeights::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_params", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLS_GLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestYuleWalker::test_params", "statsmodels/regression/tests/test_regression.py::TestFtest::test_F", "statsmodels/regression/tests/test_regression.py::TestFtest::test_p", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_denom", "statsmodels/regression/tests/test_regression.py::TestFtest::test_Df_num", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_sd", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestTtest::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_effect", "statsmodels/regression/tests/test_regression.py::TestTtest::test_new_tvalue", "statsmodels/regression/tests/test_regression.py::TestTtest::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestTtest::test_sd", "statsmodels/regression/tests/test_regression.py::TestTtest::test_tvalue", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_bic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_loglike", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_params", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_aic", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_scale", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_wresid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_degrees", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_ess", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestWLSScalarVsArray::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_aic", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxOneNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_bic", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_wresid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_ess", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_loglike", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_degrees", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_params", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_scale", "statsmodels/regression/tests/test_regression.py::TestNxNx::test_aic", "statsmodels/regression/tests/test_regression.py::TestNonFit::test_df_resid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_scale", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_bic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_resids", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_params", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_degrees", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_ess", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_loglike", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_wresid", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_aic", "statsmodels/regression/tests/test_regression.py::TestDataDimensions::test_standarderrors", "statsmodels/regression/tests/test_regression.py::test_wls_example", "statsmodels/regression/tests/test_regression.py::test_wls_tss", "statsmodels/regression/tests/test_regression.py::test_wls_missing", "statsmodels/regression/tests/test_regression.py::test_const_indicator", "statsmodels/regression/tests/test_regression.py::test_fvalue_const_only", "statsmodels/regression/tests/test_regression.py::test_summary_as_latex", "statsmodels/regression/tests/test_regression.py::test_bad_size", "statsmodels/regression/tests/test_regression.py::test_conf_int_single_regressor", "statsmodels/regression/tests/test_regression.py::test_missing_formula_predict", "statsmodels/regression/tests/test_regression.py::test_ridge", "statsmodels/regression/tests/test_regression.py::test_formula_missing_cat", "statsmodels/regression/tests/test_regression.py::test_regularized_refit", "statsmodels/regression/tests/test_regression.py::test_burg_errors", "statsmodels/regression/tests/test_regression.py::test_regularized_options", "statsmodels/regression/tests/test_regression.py::test_summary_no_constant", "statsmodels/regression/tests/test_regression.py::test_ols_constant", "statsmodels/regression/tests/test_regression.py::test_fvalue_only_constant", "statsmodels/regression/tests/test_regression.py::test_fvalue_implicit_constant", "statsmodels/regression/tests/test_regression.py::test_burg", "statsmodels/regression/tests/test_regression.py::test_bool_regressor", "statsmodels/regression/tests/test_regression.py::TestGLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS::test_resid", "statsmodels/regression/tests/test_regression.py::TestGLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_fittedvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_missing", "statsmodels/regression/tests/test_regression.py::TestGLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS::test_tvalues", "statsmodels/regression/tests/test_regression.py::TestGLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_nonnested", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_LRversion", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_homoskedastic", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_nodemean", "statsmodels/regression/tests/test_regression.py::TestLM::test_LM_heteroskedastic_demean", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_params", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ss", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_ll", "statsmodels/regression/tests/test_regression.py::TestOLS_GLS_WLS_equivalence::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_df_num", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestFTest2::test_new_ftest", "statsmodels/regression/tests/test_regression.py::TestWLS_CornerCases::test_wrong_size_weights", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_OLS::test_aic", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_denom", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_df_num", "statsmodels/regression/tests/test_regression.py::TestFtestQ::test_pvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_scale", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_bic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_2d", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_params", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_degrees", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wresid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_aic", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_ess", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_loglike", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestGLS_alt_sigma::test_wrong_size_sigma_1d", "statsmodels/regression/tests/test_regression.py::TestRTO::test_wresid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_total", "statsmodels/regression/tests/test_regression.py::TestRTO::test_pvalues", "statsmodels/regression/tests/test_regression.py::TestRTO::test_resids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared_adj", "statsmodels/regression/tests/test_regression.py::TestRTO::test_degrees", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_resid", "statsmodels/regression/tests/test_regression.py::TestRTO::test_standarderrors", "statsmodels/regression/tests/test_regression.py::TestRTO::test_fvalue", "statsmodels/regression/tests/test_regression.py::TestRTO::test_bic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_mse_model", "statsmodels/regression/tests/test_regression.py::TestRTO::test_confidenceintervals", "statsmodels/regression/tests/test_regression.py::TestRTO::test_conf_int_subset", "statsmodels/regression/tests/test_regression.py::TestRTO::test_loglike", "statsmodels/regression/tests/test_regression.py::TestRTO::test_norm_resids", "statsmodels/regression/tests/test_regression.py::TestRTO::test_aic", "statsmodels/regression/tests/test_regression.py::TestRTO::test_scale", "statsmodels/regression/tests/test_regression.py::TestRTO::test_ess", "statsmodels/regression/tests/test_regression.py::TestRTO::test_rsquared", "statsmodels/regression/tests/test_regression.py::TestRTO::test_params", "statsmodels/regression/tests/test_regression.py::TestRTO::test_sumof_squaredresids", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_empty_model", "statsmodels/regression/tests/test_regression.py::TestRegularizedFit::test_regularized_weights_list", "statsmodels/regression/tests/test_predict.py::TestWLSPrediction::test_ci", "statsmodels/regression/tests/test_predict.py::TestWLSPrediction::test_glm", "statsmodels/regression/tests/test_predict.py::test_predict_se", "statsmodels/regression/tests/test_predict.py::test_predict_remove_data", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_wald", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_predict", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_edf", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_params", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_null_smoke", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_select_alpha", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_names_wrapper", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoissonFormula::test_smooth", "statsmodels/gam/tests/test_penalized.py::TestTheilPLS5::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestTheilPLS5::test_cov_robust", "statsmodels/gam/tests/test_penalized.py::TestTheilPLS5::test_params", "statsmodels/gam/tests/test_penalized.py::TestGAM6Pirls::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAM5Bfgs::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAM5Bfgs::test_predict", "statsmodels/gam/tests/test_penalized.py::TestGAM5Bfgs::test_null_smoke", "statsmodels/gam/tests/test_penalized.py::TestGAM5Bfgs::test_params", "statsmodels/gam/tests/test_penalized.py::TestGAM5Pirls::test_params", "statsmodels/gam/tests/test_penalized.py::TestGAM5Pirls::test_null_smoke", "statsmodels/gam/tests/test_penalized.py::TestGAM5Pirls::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAM6ExogBfgs::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAM6ExogBfgs::test_exog", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBS::test_params", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBS::test_null_smoke", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBS::test_edf", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBS::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBS::test_predict", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBS::test_crossval", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBS::test_smooth", "statsmodels/gam/tests/test_penalized.py::TestGAM6Bfgs::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAMMPG::test_exog", "statsmodels/gam/tests/test_penalized.py::TestGAMMPG::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGLMPenalizedPLS5::test_null_smoke", "statsmodels/gam/tests/test_penalized.py::TestGLMPenalizedPLS5::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGLMPenalizedPLS5::test_params", "statsmodels/gam/tests/test_penalized.py::TestGAM6Bfgs0::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAM6ExogPirls::test_fitted", "statsmodels/gam/tests/test_penalized.py::TestGAM6ExogPirls::test_exog", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_params", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_null_smoke", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_smooth", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_select_alpha", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_predict", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_wald", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_edf", "statsmodels/gam/tests/test_penalized.py::TestGAMMPGBSPoisson::test_fitted", "statsmodels/genmod/tests/test_gee.py::TestGEEPoissonCovType::test_wrapper", "statsmodels/genmod/tests/test_gee.py::TestGEEPoissonCovType::test_cov_type", "statsmodels/genmod/tests/test_gee.py::TestGEEOrdinalCovType::test_wrapper", "statsmodels/genmod/tests/test_gee.py::TestGEEOrdinalCovType::test_cov_type", "statsmodels/genmod/tests/test_gee.py::TestGEEPoissonFormulaCovType::test_cov_type", "statsmodels/genmod/tests/test_gee.py::test_ql_known[Gaussian]", "statsmodels/genmod/tests/test_gee.py::test_ql_diff[Gaussian]", "statsmodels/genmod/tests/test_gee.py::test_ql_diff[Binomial]", "statsmodels/genmod/tests/test_gee.py::test_ar_covsolve", "statsmodels/genmod/tests/test_gee.py::test_unstructured_incomplete", "statsmodels/genmod/tests/test_gee.py::test_regularized_gaussian", "statsmodels/genmod/tests/test_gee.py::test_ql_known[Poisson]", "statsmodels/genmod/tests/test_gee.py::test_ql_diff[Poisson]", "statsmodels/genmod/tests/test_gee.py::test_stationary_covsolve", "statsmodels/genmod/tests/test_gee.py::test_regularized_poisson", "statsmodels/genmod/tests/test_gee.py::test_quasipoisson[True]", "statsmodels/genmod/tests/test_gee.py::test_ex_covsolve", "statsmodels/genmod/tests/test_gee.py::test_plots", "statsmodels/genmod/tests/test_gee.py::test_missing", "statsmodels/genmod/tests/test_gee.py::test_quasipoisson[False]", "statsmodels/genmod/tests/test_gee.py::test_unstructured_complete", "statsmodels/genmod/tests/test_gee.py::test_qic_warnings", "statsmodels/genmod/tests/test_gee.py::test_grid_ar", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_missing", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_constraint_covtype", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_invalid_args[True-False]", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_compare_score_test_warnings", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_equivalence_from_pairs", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_invalid_args[True-True]", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_weighted", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_ordinal", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_sensitivity", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_offset_formula", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_nominal_independence", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_nested_linear", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_nominal", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_formulas", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_compare_score_test[Exchangeable]", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_multinomial", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_ordinal_formula", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_ordinal_independence", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_logistic", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_post_estimation", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_compare_score_test[Independence]", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_default_time", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_nested_pandas", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_ordinal_plot", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_margins_multinomial", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_stationary_nogrid", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_poisson_epil", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_compare_OLS", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_predict", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_missing_formula", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_invalid_args[False-True]", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_autoregressive", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_scoretest", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_linear_constrained", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_poisson", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_predict_exposure", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_compare_poisson", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_nominal_plot", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_compare_logit", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_groups", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_margins_logistic", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_stationary_grid", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_margins_poisson", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_linear", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_equivalence", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_invalid_args[False-False]", "statsmodels/genmod/tests/test_gee.py::TestGEE::test_margins_gaussian", "statsmodels/genmod/tests/test_gee.py::TestGEEMultinomialCovType::test_cov_type", "statsmodels/genmod/tests/test_gee.py::TestGEEMultinomialCovType::test_wrapper" ]
[ "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_init_kwargs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_init_kwargs", "statsmodels/regression/tests/test_regression.py::test_regularized_predict" ]
Python
[]
[]
statsmodels/statsmodels
7,757
statsmodels__statsmodels-7757
[ "7353" ]
e741f3b22302199121090822353f20d794a02148
diff --git a/statsmodels/miscmodels/ordinal_model.py b/statsmodels/miscmodels/ordinal_model.py --- a/statsmodels/miscmodels/ordinal_model.py +++ b/statsmodels/miscmodels/ordinal_model.py @@ -6,6 +6,8 @@ License: BSD-3 """ +import warnings + from statsmodels.compat.pandas import Appender import numpy as np @@ -135,6 +137,10 @@ def __init__(self, endog, exog, offset=None, distr='probit', **kwds): unique, index = np.unique(self.endog, return_inverse=True) self.endog = index labels = unique + if np.isnan(labels).any(): + msg = ("NaN in dependent variable detected. " + "Missing values need to be removed.") + raise ValueError(msg) elif self.endog.ndim == 2: if not hasattr(self, "design_info"): raise ValueError("2-dim endog not supported") @@ -146,7 +152,7 @@ def __init__(self, endog, exog, offset=None, distr='probit', **kwds): # self.endog = self.endog.argmax(1) if self.k_constant > 0: - raise ValueError("there should not be a constant in the model") + raise ValueError("There should not be a constant in the model") self._initialize_labels(labels, k_levels=k_levels) @@ -187,7 +193,6 @@ def _check_inputs(self, endog, exog): """ if not isinstance(self.distr, stats.rv_continuous): - import warnings msg = ( f"{self.distr.name} is not a scipy.stats distribution." )
diff --git a/statsmodels/genmod/tests/test_glm.py b/statsmodels/genmod/tests/test_glm.py --- a/statsmodels/genmod/tests/test_glm.py +++ b/statsmodels/genmod/tests/test_glm.py @@ -1176,16 +1176,16 @@ def check_score_hessian(results): # avoid checking score at MLE, score close to zero sc = results.model.score(params * 0.98, scale=1) # cs currently (0.9) does not work for all families - llfunc = lambda x: results.model.loglike(x, scale=1) + llfunc = lambda x: results.model.loglike(x, scale=1) # noqa sc2 = approx_fprime(params * 0.98, llfunc) - assert_allclose(sc, sc2, rtol=0.05) + assert_allclose(sc, sc2, rtol=1e-4, atol=1e-4) hess = results.model.hessian(params, scale=1) hess2 = approx_hess(params, llfunc) - assert_allclose(hess, hess2, rtol=0.05) - scfunc = lambda x: results.model.score(x, scale=1) + assert_allclose(hess, hess2, rtol=1e-4) + scfunc = lambda x: results.model.score(x, scale=1) # noqa hess3 = approx_fprime(params, scfunc) - assert_allclose(hess, hess3, rtol=0.05) + assert_allclose(hess, hess3, rtol=1e-4) def test_gradient_irls(): diff --git a/statsmodels/miscmodels/tests/test_ordinal_model.py b/statsmodels/miscmodels/tests/test_ordinal_model.py --- a/statsmodels/miscmodels/tests/test_ordinal_model.py +++ b/statsmodels/miscmodels/tests/test_ordinal_model.py @@ -283,7 +283,8 @@ def test_formula_categorical(self): assert hasattr(modf2.data, "frame") assert not hasattr(modf2, "frame") - with pytest.raises(ValueError): + msg = "Only ordered pandas Categorical" + with pytest.raises(ValueError, match=msg): # only ordered categorical or numerical endog are allowed # string endog raises ValueError OrderedModel.from_formula( @@ -375,7 +376,7 @@ def test_setup(self): # test over parameterized model with implicit constant formula = "apply ~ 0 + pared + public + gpa + C(dummy)" - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="not be a constant"): OrderedModel.from_formula(formula, data, distr='logit') # ignore constant, so we get results without exception @@ -482,3 +483,27 @@ def test_attributes(self): for attr in attributes: assert_allclose(getattr(resp, attr), getattr(res_logit, attr), rtol=1e-4) + + +def test_nan_endog_exceptions(): + nobs = 15 + y = np.repeat(np.arange(3), nobs // 3) + x = np.column_stack((np.ones(nobs), np.arange(nobs))) + with pytest.raises(ValueError, match="not be a constant"): + OrderedModel(y, x, distr='logit') + + y_nan = y.astype(float) + y_nan[0] = np.nan + with pytest.raises(ValueError, match="NaN in dependent variable"): + OrderedModel(y_nan, x[:, 1:], distr='logit') + + if hasattr(pd, "CategoricalDtype"): + df = pd.DataFrame({ + "endog": pd.Series( + y, dtype=pd.CategoricalDtype([1, 2, 3], ordered=True)), + "exog": x[:, 1] + }) + + msg = "missing values in categorical endog" + with pytest.raises(ValueError, match=msg): + OrderedModel(df["endog"], df[["exog"]])
missing endog values in OrderedModel #### Describe the bug OrderedModel produces one threshold per occurrence of nan in endog. If not a pandas Categorical, Labels in OrderedModel are computed using `np.unique` [here](https://github.com/statsmodels/statsmodels/blob/a91eea01799b2fa1bcafe0dea0f00076091d9f5e/statsmodels/miscmodels/ordinal_model.py#L130). However `np.unique` does not squash nans: ```python In [2]: np.unique([np.nan, np.nan, np.nan, 1, 2, 1, 2]) Out[2]: array([1, 2, nan, nan, nan]) ``` I'm not sure if this is a bug in `numpy.unique()` or the intended behavior there, but the result is, instead of either 1 or 0 output values for nan (I'm not sure which would make the most sense, probably none?), a label and threshold is produced for *each* nan: <img width="387" alt="Screen Shot 2021-02-27 at 14 37 20" src="https://user-images.githubusercontent.com/151929/109388759-56088e80-7909-11eb-953c-5b28a6c70aa7.png"> Since the same input cast to a pandas categorical produces a ValueError: > ValueError: missing values in categorical endog are not supported Presumably the same error should be raised in this case as well. #### Code Sample, a copy-pastable example if possible ```python import numpy as np import pandas as pd import scipy.stats as stats from statsmodels.miscmodels.ordinal_model import OrderedModel N = 100 df = pd.DataFrame( { "target": np.random.randint(1, 3, N).astype(float), "factor1": np.random.random(N), "factor2": np.random.random(N), } ) df.loc[1:4, "target"] = float("nan") mod_prob = OrderedModel(df["target"], df[["factor1", "factor2"]], distr="probit") res_prob = mod_prob.fit(method="bfgs") res_prob.summary() ``` which produces the output above, with one output per missing value. Casting the same column to a categorical results in a sensible error: ```python df["target"] = df["target"].astype(pd.CategoricalDtype(categories=[1, 2, 3], ordered=True)) ... ValueError: missing values in categorical endog are not supported ``` Since this was a pandas dataframe, calling `dropna(subset=["target"])` prior to passing to the model worked just fine, and produced the desired result. <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `master`. If your problem has been fixed in an unreleased version, you might be able to use `master` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the master branch of this repository? It helps the limited resources if we know problems exist in the current master so that they do not need to check whether the code sample produces a bug in the next release. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output Either of these would make sense to me: - early error that nans won't work (simplest and seems to match an existing error check in categorical endog handling) - handle and drop missing values using `np.isnan` (unclear if this would break things to not have a label for an output) #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] ``` INSTALLED VERSIONS ------------------ Python: 3.8.6.final.0 OS: Darwin 20.3.0 Darwin Kernel Version 20.3.0: Thu Jan 21 00:07:06 PST 2021; root:xnu-7195.81.3~1/RELEASE_X86_64 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: v0.13.0.dev0+220.ga91eea017 (src/statsmodels/statsmodels) Required Dependencies ===================== cython: 3.0a6 (/Users/minrk/conda/lib/python3.8/site-packages/Cython) numpy: 1.19.1 (/Users/minrk/conda/lib/python3.8/site-packages/numpy) scipy: 1.5.2 (/Users/minrk/conda/lib/python3.8/site-packages/scipy) pandas: 1.1.2 (/Users/minrk/conda/lib/python3.8/site-packages/pandas) dateutil: 2.8.1 (/Users/minrk/conda/lib/python3.8/site-packages/dateutil) patsy: 0.5.1 (/Users/minrk/conda/lib/python3.8/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.3.2 (/Users/minrk/conda/lib/python3.8/site-packages/matplotlib) backend: MacOSX cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: 8.0.0.dev (/Users/minrk/dev/ip/ipython/IPython) jinja2: 2.11.2 (/Users/minrk/conda/lib/python3.8/site-packages/jinja2) sphinx: 3.2.1 (/Users/minrk/conda/lib/python3.8/site-packages/sphinx) pygments: 2.7.1 (/Users/minrk/conda/lib/python3.8/site-packages/pygments) pytest: 6.1.1 (/Users/minrk/conda/lib/python3.8/site-packages/pytest) virtualenv: 20.0.33 (/Users/minrk/conda/lib/python3.8/site-packages/virtualenv) ``` </details>
"2021-09-26T22:51:14Z"
0.13dev
[ "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestRegularized::test_regularized", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_r", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_summary", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_bic", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_bse", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_init_kwargs", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_params", "statsmodels/genmod/tests/test_glm.py::test_perfect_pred", "statsmodels/genmod/tests/test_glm.py::test_score_test_ols", "statsmodels/genmod/tests/test_glm.py::test_attribute_writable_resettable", "statsmodels/genmod/tests/test_glm.py::test_summary", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls", "statsmodels/genmod/tests/test_glm.py::test_glm_start_params", "statsmodels/genmod/tests/test_glm.py::test_formula_missing_exposure", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls_eim", "statsmodels/genmod/tests/test_glm.py::test_loglike_no_opt", "statsmodels/genmod/tests/test_glm.py::test_plots", "statsmodels/genmod/tests/test_glm.py::test_glm_irls_method", "statsmodels/genmod/tests/test_glm.py::test_wtd_patsy_missing", "statsmodels/genmod/tests/test_glm.py::test_tweedie_elastic_net", "statsmodels/genmod/tests/test_glm.py::test_glm_lasso_6431", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_poisson_limit", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL", "statsmodels/genmod/tests/test_glm.py::testTweediePowerEstimate", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_upper_limit", "statsmodels/genmod/tests/test_glm.py::test_glm_bic", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int16]", "statsmodels/genmod/tests/test_glm.py::test_int_scale", "statsmodels/genmod/tests/test_glm.py::test_output_exposure_null", "statsmodels/genmod/tests/test_glm.py::test_non_invertible_hessian_fails_summary", "statsmodels/genmod/tests/test_glm.py::test_poisson_deviance", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int64]", "statsmodels/genmod/tests/test_glm.py::test_tweedie_score", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int32]", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int8]", "statsmodels/genmod/tests/test_glm.py::test_qaic", "statsmodels/genmod/tests/test_glm.py::test_glm_bic_warning", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_mu", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_missing", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_offset_exposure", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_predict", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_endog_dtype", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution_binom_count", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog_formula", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_resid", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_scale", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_bic", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_params", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_missing", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_resid", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_df", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitBinary::test_attributes", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitModel::test_basic", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitModel::test_postestimation", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitModel::test_unordered", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitModel::test_formula", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitModel::test_results_other", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitModel::test_pandas", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_results_other", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_formula_categorical", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_unordered", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_loglikerelated", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_basic", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_formula", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_pandas", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestProbitModel::test_offset", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestLogitModelFormula::test_setup", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestCLogLogModel::test_basic", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestCLogLogModel::test_pandas", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestCLogLogModel::test_unordered", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestCLogLogModel::test_results_other", "statsmodels/miscmodels/tests/test_ordinal_model.py::TestCLogLogModel::test_formula" ]
[ "statsmodels/miscmodels/tests/test_ordinal_model.py::test_nan_endog_exceptions" ]
Python
[ "https://user-images.githubusercontent.com/151929/109388759-56088e80-7909-11eb-953c-5b28a6c70aa7.png" ]
[]
statsmodels/statsmodels
7,772
statsmodels__statsmodels-7772
[ "7771" ]
b8b86097f8e20fb0d132a2ae84fa9a3e0a45e499
diff --git a/statsmodels/stats/meta_analysis.py b/statsmodels/stats/meta_analysis.py --- a/statsmodels/stats/meta_analysis.py +++ b/statsmodels/stats/meta_analysis.py @@ -276,13 +276,31 @@ def summary_frame(self, alpha=0.05, use_t=None): results = pd.DataFrame(res, index=labels, columns=col_names) return results - def plot_forest(self, ax=None, **kwds): + def plot_forest(self, alpha=0.05, use_t=None, use_exp=False, + ax=None, **kwds): """Forest plot with means and confidence intervals Parameters ---------- ax : None or matplotlib axis instance If ax is provided, then the plot will be added to it. + alpha : float in (0, 1) + Significance level for confidence interval. Nominal coverage is + ``1 - alpha``. + use_t : None or bool + If use_t is None, then the attribute `use_t` determines whether + normal or t-distribution is used for confidence intervals. + Specifying use_t overrides the attribute. + If use_t is false, then confidence intervals are based on the + normal distribution. If it is true, then the t-distribution is + used. + use_exp : bool + If `use_exp` is True, then the effect size and confidence limits + will be exponentiated. This transform log-odds-ration into + odds-ratio, and similarly for risk-ratio. + ax : AxesSubplot, optional + If given, this axes is used to plot in instead of a new figure + being created. kwds : optional keyword arguments Keywords are forwarded to the dot_plot function that creates the plot. @@ -297,7 +315,9 @@ def plot_forest(self, ax=None, **kwds): """ from statsmodels.graphics.dotplots import dot_plot - res_df = self.summary_frame() + res_df = self.summary_frame(alpha=alpha, use_t=use_t) + if use_exp: + res_df = np.exp(res_df[["eff", "ci_low", "ci_upp"]]) hw = np.abs(res_df[["ci_low", "ci_upp"]] - res_df[["eff"]].values) fig = dot_plot(points=res_df["eff"], intervals=hw, lines=res_df.index, line_order=res_df.index, **kwds)
diff --git a/statsmodels/stats/tests/test_meta.py b/statsmodels/stats/tests/test_meta.py --- a/statsmodels/stats/tests/test_meta.py +++ b/statsmodels/stats/tests/test_meta.py @@ -11,6 +11,7 @@ import numpy as np import pandas as pd +import pytest from numpy.testing import assert_equal, assert_allclose @@ -353,3 +354,14 @@ def test_basic(self): var_weights=weights) res_glm = mod_glm.fit() assert_allclose(res_glm.params, res2.TE_random, rtol=1e-13) + + @pytest.mark.matplotlib + def test_plot(self): + # smoke tests + res1 = self.res1 + # `use_t=False` avoids warning about missing nobs for use_t is true + res1.plot_forest(use_t=False) + res1.plot_forest(use_exp=True, use_t=False) + res1.plot_forest(alpha=0.01, use_t=False) + with pytest.raises(TypeError, match="unexpected keyword"): + res1.plot_forest(junk=5, use_t=False)
ENH: allow to plot OR or RR instead of logOR and logRR with `plot_forest` With `statsmodels.stats.meta_analysis` `effectsize_2proportions`, we get the log odds ratio or log risk ratio when use the corresponding statistic option, and we can get the OR and RR, as well as their corresponding CI, by using `exp` `np.exp(res_eyding.summary_frame())` But we cannot plot the OR or RR with `plot_forest`.
good idea Just changing the plot would only be one line (or 3 total) (I just saw that we don't specify a plot title. That seems to be currently up to the user. AFAICS, the plot doesn't specify which effect size is used.) What would be a good name for an option in `plot_forest`? `use_exp=False` anti-log or reverse log would be descriptive names Stats uses `eform` to convert log-or and similar #2235 my "eform" function only uses generic names like `transform` another missing: the plot_forest doesn't have `alpha=0.05, use_t=None` to forward options to summary_frame method `use_exp=False` makes a lot of sense for the plot title, xlabels etc, it's no big deal we can always use `plt.gca().set_title('blabla')` question while I'm checking the unit tests: is `res.use_t` True for the combine results with log-or and similar proportion effect sizes? I get a warning in the unit test, that `use_t=True` needs nobs
"2021-10-01T15:00:29Z"
0.13dev
[ "statsmodels/stats/tests/test_meta.py::TestEffSmdMeta::test_smd", "statsmodels/stats/tests/test_meta.py::TestMetaK1::test_tau_kacker", "statsmodels/stats/tests/test_meta.py::TestMetaK1::test_pm", "statsmodels/stats/tests/test_meta.py::TestMetaK1::test_dl", "statsmodels/stats/tests/test_meta.py::TestEffectsizeBinom::test_effectsize", "statsmodels/stats/tests/test_meta.py::TestMetaBinOR::test_basic" ]
[ "statsmodels/stats/tests/test_meta.py::TestMetaBinOR::test_plot" ]
Python
[]
[]
statsmodels/statsmodels
7,791
statsmodels__statsmodels-7791
[ "7790" ]
a5ec3cb3e1114e7b10540683b621fdc402382e85
diff --git a/statsmodels/tsa/holtwinters/model.py b/statsmodels/tsa/holtwinters/model.py --- a/statsmodels/tsa/holtwinters/model.py +++ b/statsmodels/tsa/holtwinters/model.py @@ -496,7 +496,7 @@ def predict(self, params, start=None, end=None): if start is None: freq = getattr(self._index, "freq", 1) if isinstance(freq, int): - start = self._index.shape[0] + freq + start = self._index.shape[0] else: start = self._index[-1] + freq start, end, out_of_sample, _ = self._get_prediction_index(
diff --git a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py --- a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py +++ b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py @@ -1025,6 +1025,38 @@ def test_damping_trend_zero(): assert_allclose(pred1, pred2, atol=1e-10) +def test_different_inputs(): + array_input_add = [10, 20, 30, 40, 50] + series_index_add = pd.date_range(start="2000-1-1", periods=len(array_input_add)) + series_input_add = pd.Series(array_input_add, series_index_add) + + array_input_mul = [2, 4, 8, 16, 32] + series_index_mul = pd.date_range(start="2000-1-1", periods=len(array_input_mul)) + series_input_mul = pd.Series(array_input_mul, series_index_mul) + + fit1 = ExponentialSmoothing(array_input_add, trend="add").fit() + fit2 = ExponentialSmoothing(series_input_add, trend="add").fit() + fit3 = ExponentialSmoothing(array_input_mul, trend="mul").fit() + fit4 = ExponentialSmoothing(series_input_mul, trend="mul").fit() + + assert_almost_equal(fit1.predict(), + [60], 1) + assert_almost_equal(fit1.predict(start=5, end=7), + [60, 70, 80], 1) + assert_almost_equal(fit2.predict(), + [60], 1) + assert_almost_equal(fit2.predict(start="2000-1-6", end="2000-1-8"), + [60, 70, 80], 1) + assert_almost_equal(fit3.predict(), + [64], 1) + assert_almost_equal(fit3.predict(start=5, end=7), + [64, 128, 256], 1) + assert_almost_equal(fit4.predict(), + [64], 1) + assert_almost_equal(fit4.predict(start="2000-1-6", end="2000-1-8"), + [64, 128, 256], 1) + + @pytest.fixture def austourists(): # austourists dataset from fpp2 package
BUG: incorrect HW predictions Holt's model makes incorrect predictions for some types of inputs ```python import pandas as pd from statsmodels.tsa.holtwinters import Holt # same inputs with two different types of freq (int and datetime) array_input = [10, 20, 30, 40, 50] pd_series_input = pd.Series([10, 20, 30, 40, 50], pd.date_range(start='1/1/2018', periods=5)) model_array = Holt(array_input).fit() model_series = Holt(pd_series_input).fit() print(model_array.predict()) # this model return 70 print(model_series.predict()) # this model return 60 ``` Expected behavior that both models should return the same result. I figured out the problem and fixed it in my [PR #7791](https://github.com/statsmodels/statsmodels/pull/7791). statsmodels version: 0.13.0
"2021-10-11T12:04:06Z"
0.13dev
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_r", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_predict", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_ndarray", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_fit", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_add_mul", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_buggy", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_simple_exp_smoothing", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[period]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infer_freq", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_2d_data", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_basin_hopping", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[100]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[1000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_unfixable", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_attributes", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_set_parameters", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_summary_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[L-BFGS-B]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[SLSQP]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_keywords", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[2000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_debiased", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infeasible_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[trust-constr]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_dampen", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_errors", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_initialization", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_boxcox_components", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[Powell]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[basinhopping]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[least_squares]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[date_range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_bad_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_index", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_no_params_to_optimize", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_direct_holt_add", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_minimizer_kwargs_error", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[TNC]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_damping_trend_zero", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[irregular]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_integer_array", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_basic", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_start_param_length", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_valid_bounds" ]
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_different_inputs" ]
Python
[]
[]
statsmodels/statsmodels
7,795
statsmodels__statsmodels-7795
[ "7794" ]
a5ec3cb3e1114e7b10540683b621fdc402382e85
diff --git a/statsmodels/tsa/stattools.py b/statsmodels/tsa/stattools.py --- a/statsmodels/tsa/stattools.py +++ b/statsmodels/tsa/stattools.py @@ -5,9 +5,10 @@ from statsmodels.compat.numpy import lstsq from statsmodels.compat.pandas import deprecate_kwarg -from statsmodels.compat.python import lzip +from statsmodels.compat.python import lzip, Literal from statsmodels.compat.scipy import _next_regular +from typing import Tuple import warnings import numpy as np @@ -179,8 +180,9 @@ def adfuller( ---------- x : array_like, 1d The data series to test. - maxlag : int - Maximum lag which is included in test, default 12*(nobs/100)^{1/4}. + maxlag : {None, int} + Maximum lag which is included in test, default value of + 12*(nobs/100)^{1/4} is used when ``None``. regression : {"c","ct","ctt","n"} Constant and trend order to include in regression. @@ -1059,7 +1061,7 @@ def ccovf(x, y, adjusted=True, demean=True, fft=True): d = n method = "fft" if fft else "direct" - return correlate(xo, yo, "full", method=method)[n - 1:] / d + return correlate(xo, yo, "full", method=method)[n - 1 :] / d @deprecate_kwarg("unbiased", "adjusted") @@ -1867,7 +1869,12 @@ def has_missing(data): return np.isnan(np.sum(data)) -def kpss(x, regression="c", nlags="auto", store=False): +def kpss( + x, + regression: Literal["c", "ct"] = "c", + nlags: Literal["auto", "legacy"] | int = "auto", + store: bool = False, +) -> Tuple[float, float, int, dict[str, float]]: """ Kwiatkowski-Phillips-Schmidt-Shin test for stationarity. @@ -1883,12 +1890,11 @@ def kpss(x, regression="c", nlags="auto", store=False): * "c" : The data is stationary around a constant (default). * "ct" : The data is stationary around a trend. - nlags : {None, str, int}, optional - Indicates the number of lags to be used. If None (default), lags is - calculated using the legacy method. If "auto", lags is calculated - using the data-dependent method of Hobijn et al. (1998). See also - Andrews (1991), Newey & West (1994), and Schwert (1989). If set to - "legacy", uses int(12 * (n / 100)**(1 / 4)) , as outlined in + nlags : {str, int}, optional + Indicates the number of lags to be used. If "auto" (default), lags + is calculated using the data-dependent method of Hobijn et al. (1998). + See also Andrews (1991), Newey & West (1994), and Schwert (1989). If + set to "legacy", uses int(12 * (n / 100)**(1 / 4)) , as outlined in Schwert (1989). store : bool If True, then a result instance is returned additionally to @@ -1914,7 +1920,7 @@ def kpss(x, regression="c", nlags="auto", store=False): Notes ----- - To estimate sigma^2 the Newey-West estimator is used. If lags is None, + To estimate sigma^2 the Newey-West estimator is used. If lags is "legacy", the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)), as outlined in Schwert (1989). The p-values are interpolated from Table 1 of Kwiatkowski et al. (1992). If the computed statistic is @@ -1968,19 +1974,26 @@ def kpss(x, regression="c", nlags="auto", store=False): if nlags == "legacy": nlags = int(np.ceil(12.0 * np.power(nobs / 100.0, 1 / 4.0))) nlags = min(nlags, nobs - 1) - elif nlags == "auto": + elif nlags == "auto" or nlags is None: + if nlags is None: + # TODO: Remove before 0.14 is released + warnings.warn( + "None is not a valid value for nlags. It must be an integer, " + "'auto' or 'legacy'. None will raise starting in 0.14", + FutureWarning, + ) # autolag method of Hobijn et al. (1998) nlags = _kpss_autolag(resids, nobs) nlags = min(nlags, nobs - 1) + elif isinstance(nlags, str): + raise ValueError("nvals must be 'auto' or 'legacy' when not an int") else: - nlags = int(nlags) + nlags = int_like(nlags, "nlags", optional=False) - if nlags >= nobs: - raise ValueError( - "lags ({}) must be < number of observations ({})".format( - nlags, nobs + if nlags >= nobs: + raise ValueError( + f"lags ({nlags}) must be < number of observations ({nobs})" ) - ) pvals = [0.10, 0.05, 0.025, 0.01] @@ -2053,6 +2066,7 @@ def _kpss_autolag(resids, nobs): autolags = int(gamma_hat * np.power(nobs, pwr)) return autolags + def range_unit_root_test(x, store=False): """ Range unit-root test for stationarity. @@ -2111,21 +2125,25 @@ def range_unit_root_test(x, store=False): # Table from [1] has been replicated using 200,000 samples # Critical values for new n_obs values have been identified pvals = [0.01, 0.025, 0.05, 0.10, 0.90, 0.95] - n = np.array([25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000]) - crit = np.array([ - [0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312], - [0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613], - [0.9070, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393], - [0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049], - [0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482], - [0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2842], - [1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584], - [1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073], - [1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439], - [1.1204, 1.2295, 1.3303, 1.4656, 3.1054, 3.4632], - [1.1309, 1.2347, 1.3378, 1.4693, 3.1165, 3.4717], - [1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807] - ]) + n = np.array( + [25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000] + ) + crit = np.array( + [ + [0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312], + [0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613], + [0.9070, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393], + [0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049], + [0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482], + [0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2842], + [1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584], + [1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073], + [1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439], + [1.1204, 1.2295, 1.3303, 1.4656, 3.1054, 3.4632], + [1.1309, 1.2347, 1.3378, 1.4693, 3.1165, 3.4717], + [1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807], + ] + ) # Interpolation for nobs inter_crit = np.zeros((1, crit.shape[1])) @@ -2141,9 +2159,9 @@ def range_unit_root_test(x, store=False): rur_stat = count / np.sqrt(len(x)) - k = len(pvals)-1 - for i in range(len(pvals)-1,-1,-1): - if rur_stat < inter_crit[0,i]: + k = len(pvals) - 1 + for i in range(len(pvals) - 1, -1, -1): + if rur_stat < inter_crit[0, i]: k = i else: break @@ -2156,14 +2174,21 @@ def range_unit_root_test(x, store=False): """ direction = "" if p_value == pvals[-1]: - direction="smaller" + direction = "smaller" elif p_value == pvals[0]: - direction="larger" + direction = "larger" if direction: - warnings.warn(warn_msg.format(direction=direction), InterpolationWarning) + warnings.warn( + warn_msg.format(direction=direction), InterpolationWarning + ) - crit_dict = {"10%": inter_crit[0,3], "5%": inter_crit[0,2], "2.5%": inter_crit[0,1], "1%": inter_crit[0,0]} + crit_dict = { + "10%": inter_crit[0, 3], + "5%": inter_crit[0, 2], + "2.5%": inter_crit[0, 1], + "1%": inter_crit[0, 0], + } if store: from statsmodels.stats.diagnostic import ResultsStore
diff --git a/statsmodels/tsa/tests/test_stattools.py b/statsmodels/tsa/tests/test_stattools.py --- a/statsmodels/tsa/tests/test_stattools.py +++ b/statsmodels/tsa/tests/test_stattools.py @@ -17,6 +17,7 @@ import pandas as pd from pandas import DataFrame, Series, date_range import pytest +from scipy.interpolate import interp1d from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots from statsmodels.tools.sm_exceptions import ( @@ -25,15 +26,17 @@ InterpolationWarning, MissingDataError, ) +# Remove imports when range unit root test gets an R implementation +from statsmodels.tools.validation import array_like, bool_like from statsmodels.tsa.arima_process import arma_acovf from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.stattools import ( acf, acovf, - ccovf, adfuller, arma_order_select_ic, breakvar_heteroskedasticity_test, + ccovf, coint, grangercausalitytests, innovations_algo, @@ -49,10 +52,6 @@ zivot_andrews, ) -# Remove imports when range unit root test gets an R implementation -from statsmodels.tools.validation import array_like, bool_like -from scipy.interpolate import interp1d - DECIMAL_8 = 8 DECIMAL_6 = 6 DECIMAL_5 = 5 @@ -829,6 +828,10 @@ def test_unknown_lags(self): with pytest.raises(ValueError): kpss(self.x, "c", nlags="unknown") + def test_none(self): + with pytest.warns(FutureWarning): + kpss(self.x, nlags=None) + class TestRUR: """ @@ -859,21 +862,25 @@ def simple_rur(self, x, store=False): # Table from [1] has been replicated using 200,000 samples # Critical values for new n_obs values have been identified pvals = [0.01, 0.025, 0.05, 0.10, 0.90, 0.95] - n = np.array([25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000]) - crit = np.array([ - [0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312], - [0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613], - [0.907, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393], - [0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049], - [0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482], - [0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2482], - [1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584], - [1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073], - [1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439], - [1.1204, 1.2295, 1.3318, 1.4656, 3.1054, 3.4632], - [1.1309, 1.2347, 1.3318, 1.4693, 3.1165, 3.4717], - [1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807] - ]) + n = np.array( + [25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000] + ) + crit = np.array( + [ + [0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312], + [0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613], + [0.907, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393], + [0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049], + [0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482], + [0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2482], + [1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584], + [1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073], + [1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439], + [1.1204, 1.2295, 1.3318, 1.4656, 3.1054, 3.4632], + [1.1309, 1.2347, 1.3318, 1.4693, 3.1165, 3.4717], + [1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807], + ] + ) # Interpolation for nobs inter_crit = np.zeros((1, crit.shape[1])) @@ -917,9 +924,16 @@ def simple_rur(self, x, store=False): direction = "larger" if direction: - warnings.warn(warn_msg.format(direction=direction), InterpolationWarning) + warnings.warn( + warn_msg.format(direction=direction), InterpolationWarning + ) - crit_dict = {"10%": inter_crit[0, 3], "5%": inter_crit[0, 2], "2.5%": inter_crit[0, 1], "1%": inter_crit[0, 0]} + crit_dict = { + "10%": inter_crit[0, 3], + "5%": inter_crit[0, 2], + "2.5%": inter_crit[0, 1], + "1%": inter_crit[0, 0], + } if store: from statsmodels.stats.diagnostic import ResultsStore @@ -941,21 +955,18 @@ def test_fail_nonvector_input(self, reset_randomstate): x = np.random.rand(20, 2) assert_raises(ValueError, range_unit_root_test, x) - def test_teststat(self): with pytest.warns(InterpolationWarning): rur_stat, _, _ = range_unit_root_test(self.x) simple_rur_stat, _, _ = self.simple_rur(self.x) assert_almost_equal(rur_stat, simple_rur_stat, DECIMAL_3) - def test_pval(self): with pytest.warns(InterpolationWarning): _, pval, _ = range_unit_root_test(self.x) _, simple_pval, _ = self.simple_rur(self.x) assert_equal(pval, simple_pval) - def test_store(self): with pytest.warns(InterpolationWarning): _, _, _, store = range_unit_root_test(self.x, True)
Statsmodels 0.13 statsmodels.tsa.stattools.kpss nlags=None behavior #### Describe the bug The default nlags for kpss was changed from None to "auto" in 0.13 (commit https://github.com/statsmodels/statsmodels/commit/3d6c3884b9804842ff039c8c5d63a9ccf519867a). This has 2 problems: 1. The docstring still says that nlags=None will behave like nlags="legacy" 2. The branch for None was removed, instead of setting nlags="auto" when None is provided. So now if None is passed explicitly the code assumes it is integer, and throws TypeError when it does `int(nlags)` here https://github.com/statsmodels/statsmodels/blob/v0.13.0/statsmodels/tsa/stattools.py#L1976. #### Code Sample, a copy-pastable example if possible ```python from statsmodels.tsa.stattools import kpss help(kpss) # shows None is allowed for nlags kpss([1,2,3], nlags=None) # TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType' ``` #### Expected Output The docstring should either not say that None is allowed or say that None is same as auto. The implementation should be changed to convert nlags=None to nlags="auto" if None is allowed. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.9.7.final.0 statsmodels =========== Installed: 0.13.0 (D:\ss\Miniconda3\lib\site-packages\statsmodels) Required Dependencies ===================== cython: 0.29.24 (D:\ss\Miniconda3\lib\site-packages\Cython) numpy: 1.21.2 (D:\ss\Miniconda3\lib\site-packages\numpy) scipy: 1.7.1 (D:\ss\Miniconda3\lib\site-packages\scipy) pandas: 1.3.3 (D:\ss\Miniconda3\lib\site-packages\pandas) dateutil: 2.8.2 (D:\ss\Miniconda3\lib\site-packages\dateutil) patsy: 0.5.2 (D:\ss\Miniconda3\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.4.3 (D:\ss\Miniconda3\lib\site-packages\matplotlib) backend: Qt5Agg cvxopt: Not installed joblib: 1.1.0 (D:\ss\Miniconda3\lib\site-packages\joblib) Developer Tools ================ IPython: 7.28.0 (D:\ss\Miniconda3\lib\site-packages\IPython) jinja2: 3.0.2 (D:\ss\Miniconda3\lib\site-packages\jinja2) sphinx: 4.2.0 (D:\ss\Miniconda3\lib\site-packages\sphinx) pygments: 2.10.0 (D:\ss\Miniconda3\lib\site-packages\pygments) pytest: 6.2.5 (D:\ss\Miniconda3\lib\site-packages\pytest) virtualenv: Not installed </details>
Thanks. The `None` options should have been removed.
"2021-10-13T09:16:27Z"
0.13dev
[ "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_gnpdef_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_stkprc_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_regression_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_autolag_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnpq_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rand10000_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnp_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[Aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[AIC]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_trim_value", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_array_shape", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[aic]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[2-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[two-sided-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[0.5-10-0.09048484886749095]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_2d_input_with_missing_values", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[increasing-41-0.023809523809523808]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_use_chi2", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[decreasing-0.024390243902439025-0.9761904761904762]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_1d_input", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_qstat_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_drop", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_raise", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_conservative", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_none", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_yw", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols_inefficient", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ld", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestCoint_t::test_tstat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_confint", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_store", "statsmodels/tsa/tests/test_stattools.py::test_coint", "statsmodels/tsa/tests/test_stattools.py::test_coint_identical_series", "statsmodels/tsa/tests/test_stattools.py::test_coint_perfect_collinearity", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_pandasacovf", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf2d", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_ar", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic_failure", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_maxlag_too_large", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic", "statsmodels/tsa/tests/test_stattools.py::test_acf_fft_dataframe", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_levinson_durbin", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg_error", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_rtol", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_filter_kalman_filter", "statsmodels/tsa/tests/test_stattools.py::test_acovf_error", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_pandas", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_short_series", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_levinson_durbin_acov", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset2]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset1]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset0]", "statsmodels/tsa/tests/test_stattools.py::test_acf_conservate_nanops", "statsmodels/tsa/tests/test_stattools.py::test_coint_auto_tstat", "statsmodels/tsa/tests/test_stattools.py::test_pacf_nlags_error", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset3]", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_zero_lag", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_finite_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality_single", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_store_str", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_legacy_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_autolags_does_not_assign_lags_equal_to_nobs", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_unclear_hypothesis", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_store", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_unknown_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_nonvector_input" ]
[ "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_none" ]
Python
[]
[]
statsmodels/statsmodels
7,998
statsmodels__statsmodels-7998
[ "7985" ]
a0b2bef5b2555c70a87785d8bac5c869e511e12a
diff --git a/statsmodels/distributions/copula/transforms.py b/statsmodels/distributions/copula/transforms.py --- a/statsmodels/distributions/copula/transforms.py +++ b/statsmodels/distributions/copula/transforms.py @@ -8,6 +8,7 @@ License: BSD-3 """ +import warnings import numpy as np from scipy.special import expm1 @@ -24,7 +25,10 @@ class TransfFrank(Transforms): def evaluate(self, t, theta): t = np.asarray(t) - return - (np.log(-expm1(-theta*t)) - np.log(-expm1(-theta))) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + val = -(np.log(-expm1(-theta*t)) - np.log(-expm1(-theta))) + return val # return - np.log(expm1(-theta*t) / expm1(-theta)) def inverse(self, phi, theta): diff --git a/statsmodels/distributions/tools.py b/statsmodels/distributions/tools.py --- a/statsmodels/distributions/tools.py +++ b/statsmodels/distributions/tools.py @@ -6,6 +6,7 @@ License: BSD-3 """ +import warnings import numpy as np from scipy import interpolate, stats @@ -324,13 +325,19 @@ def _eval_bernstein_1d(x, fvals, method="binom"): n = k_terms - 1. if method.lower() == "binom": - poly_base = stats.binom.pmf(k, n, xx[..., None]) + # Divide by 0 RuntimeWarning here + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + poly_base = stats.binom.pmf(k, n, xx[..., None]) bp_values = (fvals * poly_base).sum(-1) elif method.lower() == "bpoly": bpb = interpolate.BPoly(fvals[:, None], [0., 1]) bp_values = bpb(x) elif method.lower() == "beta": - poly_base = stats.beta.pdf(xx[..., None], k + 1, n - k + 1) / (n + 1) + # Divide by 0 RuntimeWarning here + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + poly_base = stats.beta.pdf(xx[..., None], k + 1, n - k + 1) / (n + 1) bp_values = (fvals * poly_base).sum(-1) else: raise ValueError("method not recogized") diff --git a/statsmodels/stats/proportion.py b/statsmodels/stats/proportion.py --- a/statsmodels/stats/proportion.py +++ b/statsmodels/stats/proportion.py @@ -8,19 +8,21 @@ """ from statsmodels.compat.python import lzip + import numpy as np -from scipy import stats, optimize -from sys import float_info +from scipy import optimize, stats -from statsmodels.stats.base import AllPairsResults -from statsmodels.tools.sm_exceptions import HypothesisTestWarning +from statsmodels.stats.base import AllPairsResults, HolderTuple from statsmodels.stats.weightstats import _zstat_generic2 -from statsmodels.stats.base import HolderTuple +from statsmodels.tools.sm_exceptions import HypothesisTestWarning from statsmodels.tools.testing import Holder +FLOAT_INFO = np.finfo(float) + def proportion_confint(count, nobs, alpha=0.05, method='normal'): - '''confidence interval for a binomial proportion + """ + Confidence interval for a binomial proportion Parameters ---------- @@ -76,17 +78,28 @@ def proportion_confint(count, nobs, alpha=0.05, method='normal'): Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001). "Interval Estimation for a Binomial Proportion", Statistical Science 16 (2): 101–133. doi:10.1214/ss/1009213286. - - ''' + """ pd_index = getattr(count, 'index', None) if pd_index is not None and callable(pd_index): # this rules out lists, lists have an index method pd_index = None - count = np.asarray(count) - nobs = np.asarray(nobs) - q_ = count * 1. / nobs + def _check(x: np.ndarray, name: str) -> np.ndarray: + if np.issubdtype(x.dtype, np.integer): + return x + y = x.astype(np.int64, casting="unsafe") + if np.any(y != x): + raise ValueError( + f"{name} must have an integral dtype. Found data with " + f"dtype {x.dtype}" + ) + return y + + count = _check(np.asarray(count), "count") + nobs = _check(np.asarray(nobs), "nobs") + + q_ = count / nobs alpha_2 = 0.5 * alpha if method == 'normal': @@ -95,18 +108,53 @@ def proportion_confint(count, nobs, alpha=0.05, method='normal'): ci_low = q_ - dist ci_upp = q_ + dist + + elif method == 'binom_test': # inverting the binomial test def func(qi): - return stats.binom_test(q_ * nobs, nobs, p=qi) - alpha + return stats.binom_test(count, nobs, p=qi) - alpha + + def _bound(qi, lower=True): + """ + Try hard to find a bound different from eps or 1 - eps + + Parameters + ---------- + qi : float + The empirical success rate + lower : bool + Whether to fund a lower bound for the left side of the CI + + Returns + ------- + float + The coarse bound + """ + default = FLOAT_INFO.eps if lower else 1.0 - FLOAT_INFO.eps + + def step(v): + return v / 8 if lower else v + (1.0 - v) / 8 + + x = step(qi) + w = func(x) + cnt = 1 + while w > 0 and cnt < 10: + x = step(x) + w = func(x) + cnt += 1 + return x if cnt < 10 else default + if count == 0: ci_low = 0 else: - ci_low = optimize.brentq(func, float_info.min, q_) + lower_bnd = _bound(q_, lower=True) + ci_low = optimize.brentq(func, lower_bnd, q_) if count == nobs: ci_upp = 1 else: - ci_upp = optimize.brentq(func, q_, 1. - float_info.epsilon) + upper_bnd = _bound(q_, lower=False) + ci_upp = optimize.brentq(func, q_, upper_bnd) elif method == 'beta': ci_low = stats.beta.ppf(alpha_2, count, nobs - count + 1)
diff --git a/statsmodels/distributions/tests/test_bernstein.py b/statsmodels/distributions/tests/test_bernstein.py --- a/statsmodels/distributions/tests/test_bernstein.py +++ b/statsmodels/distributions/tests/test_bernstein.py @@ -21,6 +21,9 @@ def test_bernstein_distribution_1d(): grid = dt._Grid([501]) + loc = grid.x_flat == 0 + grid.x_flat[loc] = grid.x_flat[~loc].min() / 2 + grid.x_flat[grid.x_flat == 1] = 1 - grid.x_flat.min() distr = stats.beta(3, 5) cdf_g = distr.cdf(np.squeeze(grid.x_flat)) diff --git a/statsmodels/stats/tests/test_proportion.py b/statsmodels/stats/tests/test_proportion.py --- a/statsmodels/stats/tests/test_proportion.py +++ b/statsmodels/stats/tests/test_proportion.py @@ -7,24 +7,29 @@ """ import warnings -import pytest import numpy as np +from numpy.testing import ( + assert_allclose, + assert_almost_equal, + assert_array_less, + assert_equal, + assert_raises, +) import pandas as pd -from numpy.testing import (assert_almost_equal, assert_equal, - assert_array_less, assert_raises, assert_allclose) - -from statsmodels.stats.proportion import (proportion_confint, - confint_proportions_2indep, - multinomial_proportions_confint, - score_test_proportions_2indep, - power_proportions_2indep, - samplesize_proportions_2indep_onetail, - ) +import pytest + import statsmodels.stats.proportion as smprop +from statsmodels.stats.proportion import ( + confint_proportions_2indep, + multinomial_proportions_confint, + power_proportions_2indep, + proportion_confint, + samplesize_proportions_2indep_onetail, + score_test_proportions_2indep, +) from statsmodels.tools.sm_exceptions import HypothesisTestWarning from statsmodels.tools.testing import Holder - probci_methods = {'agresti_coull': 'agresti-coull', 'normal': 'asymptotic', 'beta': 'exact', @@ -909,3 +914,34 @@ def test_power_2indep(): ratio=1, alpha=0.05, value=0, alternative='two-sided') assert_allclose(n2, n, rtol=1e-13) + + [email protected]("count", np.arange(10, 90, 5)) [email protected]("method", list(probci_methods.keys()) + ["binom_test"]) +def test_ci_symmetry(count, method): + n = 100 + a = proportion_confint(count, n, method=method) + b = proportion_confint(n - count, n, method=method) + assert_allclose(np.array(a), 1.0 - np.array(b[::-1])) + + [email protected]("nobs", [47, 50]) [email protected]("count", np.arange(48)) +def test_ci_symmetry_binom_test(nobs, count): + a = proportion_confint(count, nobs, method="binom_test") + b = proportion_confint(nobs - count, nobs, method="binom_test") + assert_allclose(np.array(a), 1.0 - np.array(b[::-1])) + + +def test_int_check(): + with pytest.raises(ValueError): + proportion_confint(10.5, 20) + with pytest.raises(ValueError): + proportion_confint(10, 20.5) + with pytest.raises(ValueError): + proportion_confint(np.array([10.3]), 20) + a = proportion_confint(21.0, 47, method="binom_test") + b = proportion_confint(21, 47, method="binom_test") + c = proportion_confint(21, 47.0, method="binom_test") + assert_allclose(a, b) + assert_allclose(a, c)
proportion_confint inaccurate for method="binom_test" There is a symmetry for binomial proportion confidence interval therefore following property should holds (approximately): proportion_confint(count, nobs, method="binom_test")[0] + proportion_confint(nobs-count, nobs, method="binom_test")[1] ==1.0 but for some cases this property does not hold, for example, the following code: ```python from statsmodels.stats.proportion import proportion_confint eps = 1e-8 for nobs in [47, 50]: for count in range(nobs+1): p1 = proportion_confint(count, nobs, method="binom_test") p2 = proportion_confint(nobs-count, nobs, method="binom_test") if abs(p1[0]+p2[1]-1.0)>eps: print(f"{nobs=} {count=} {p1[0]+p2[1]}") ``` gives ouput: ``` nobs=47 count=3 0.9899913901078568 nobs=47 count=6 0.9857792597568958 nobs=47 count=12 0.9864319871901812 nobs=47 count=16 0.9851553703015263 nobs=47 count=23 0.9802691065584829 nobs=47 count=24 0.980269106558583 nobs=47 count=31 0.978547082683319 nobs=47 count=35 0.9718877308702878 nobs=47 count=41 0.978042999703059 nobs=47 count=44 0.9680701802192785 nobs=50 count=21 0.9796386543796607 nobs=50 count=29 0.9798450168011505 ``` Which indicate that there is some bug. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.8.5.final.0 statsmodels =========== Installed: 0.13.1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.17.3 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\numpy) scipy: 1.3.3 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\scipy) pandas: 0.25.3 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\pandas) dateutil: 2.8.1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\dateutil) patsy: 0.5.2 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.2.0rc1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\matplotlib) backend: TkAgg cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: 7.17.0 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\IPython) jinja2: 2.11.2 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\jinja2) sphinx: Not installed pygments: 2.6.1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\pygments) pytest: Not installed virtualenv: Not installed </details>
Thanks for reporting I can confirm this, but don't see any obvious problem. It's either because of some discreteness in scipy.stats.binom_test or in scipy.optimize.brentq. The differences in the confint are nontrivial, but inverting binom_test is not one of the recommended confints (both in the literature and in terms of the quickly done implementation). But I don't have time to dig into this right now (It's too far away from what I'm currently working on.) label `bug-wrong` because we should have consistent results for symmetry. However my guess is that they choose different solution because of non-monotonicities or discreteness, which are often a problem with hypothesis tests for proportions. Requirement for size is only `<= alpha` but not `== alpha` because of the discreteness of the sample space.
"2022-01-07T15:18:48Z"
0.13dev
[ "statsmodels/stats/tests/test_proportion.py::TestProportion::test_proptest", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_pairwiseproptest", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_number_pairs_1493", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_scalar", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_default_values", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[jeffreys]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[wilson]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[beta]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[normal]", "statsmodels/stats/tests/test_proportion.py::test_proportion_effect_size", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions_zeros", "statsmodels/stats/tests/test_proportion.py::test_samplesize_confidenceinterval_prop", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion", "statsmodels/stats/tests/test_proportion.py::test_multinomial_proportions_errors", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[agresti_coull]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count45-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count4-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count47-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count40-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count12-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count15-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count5-47]", "statsmodels/stats/tests/test_proportion.py::test_ztost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count46-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count11-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count33-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count40-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count28-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop_norm", "statsmodels/stats/tests/test_proportion.py::test_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count14-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count28-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count27-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count18-50]", "statsmodels/stats/tests/test_proportion.py::test_equivalence_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count21-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count39-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count19-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count1-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count42-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_power_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count11-50]", "statsmodels/stats/tests/test_proportion.py::test_proportion_ztests", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count30-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count9-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count22-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count41-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count31-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count32-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count34-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count0-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count6-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count2-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count26-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count34-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count8-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count43-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count8-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count10-50]", "statsmodels/stats/tests/test_proportion.py::test_score_confint_koopman_nam", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count38-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count30-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count44-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count32-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count24-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count37-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count13-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_binom_rejection_interval", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count9-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count0-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count20-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count17-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count13-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count45-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep_propcis", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count37-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count26-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count5-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count15-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count36-50]", "statsmodels/stats/tests/test_proportion.py::test_power_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count4-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count7-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count2-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count47-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count3-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count10-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count38-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count23-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count27-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count25-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count29-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count17-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count18-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count39-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count35-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count20-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count46-47]", "statsmodels/stats/tests/test_proportion.py::test_binom_test", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count43-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_score_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count36-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count25-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count33-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count14-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count22-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count19-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count1-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count7-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count42-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count16-50]", "statsmodels/distributions/tests/test_bernstein.py::TestBernsteinBeta2d::test_rvs", "statsmodels/distributions/tests/test_bernstein.py::TestBernsteinBeta2d::test_basic", "statsmodels/distributions/tests/test_bernstein.py::TestBernsteinBeta2dd::test_rvs", "statsmodels/distributions/tests/test_bernstein.py::TestBernsteinBeta2dd::test_basic", "statsmodels/distributions/tests/test_bernstein.py::test_bernstein_distribution_2d", "statsmodels/distributions/tests/test_bernstein.py::test_bernstein_distribution_1d" ]
[ "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count12-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count24-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count29-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count6-47]", "statsmodels/stats/tests/test_proportion.py::test_int_check", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count31-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count21-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count35-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count41-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count3-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count23-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count16-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count44-47]" ]
Python
[]
[]
statsmodels/statsmodels
8,006
statsmodels__statsmodels-8006
[ "7981" ]
66ecf2ed6716ade72349d49a6ebd74e0c57e8d93
diff --git a/statsmodels/tools/typing.py b/statsmodels/tools/typing.py new file mode 100644 --- /dev/null +++ b/statsmodels/tools/typing.py @@ -0,0 +1,12 @@ +from __future__ import annotations + +from typing import Sequence, Union + +import numpy as np +from pandas import DataFrame, Series + +ArrayLike1D = Union[Sequence[Union[float, int]], np.ndarray, Series] +ArrayLike2D = Union[ + Sequence[Sequence[Union[float, int]]], np.ndarray, DataFrame +] +ArrayLike = Union[ArrayLike1D, ArrayLike2D] diff --git a/statsmodels/tsa/ar_model.py b/statsmodels/tsa/ar_model.py --- a/statsmodels/tsa/ar_model.py +++ b/statsmodels/tsa/ar_model.py @@ -9,9 +9,10 @@ ) from collections.abc import Iterable +import datetime import datetime as dt from types import SimpleNamespace -from typing import List, Tuple +from typing import Any, List, Tuple, Union import warnings import numpy as np @@ -25,6 +26,7 @@ from statsmodels.tools.decorators import cache_readonly, cache_writable from statsmodels.tools.docstring import Docstring, remove_parameters from statsmodels.tools.sm_exceptions import SpecificationWarning +from statsmodels.tools.typing import ArrayLike, ArrayLike1D, ArrayLike2D from statsmodels.tools.validation import ( array_like, bool_like, @@ -70,12 +72,12 @@ """ -def sumofsq(x, axis=0): +def sumofsq(x: np.ndarray, axis: int = 0) -> float | np.ndarray: """Helper function to calculate sum of squares along first axis""" return np.sum(x ** 2, axis=axis) -def _get_period(data, index_freq): +def _get_period(data: pd.DatetimeIndex | pd.PeriodIndex, index_freq) -> None: """Shared helper to get period from frequenc or raise""" if data.freq: return freq_to_period(index_freq) @@ -179,17 +181,17 @@ class AutoReg(tsa_model.TimeSeriesModel): def __init__( self, - endog, - lags, - trend="c", - seasonal=False, - exog=None, - hold_back=None, - period=None, - missing="none", + endog: ArrayLike1D, + lags: int | list[int], + trend: str = "c", + seasonal: bool = False, + exog: ArrayLike2D | None = None, + hold_back: int | None = None, + period: int | None = None, + missing: str = "none", *, - deterministic=None, - old_names=False, + deterministic: DeterministicProcess | None = None, + old_names: bool = False, ): super().__init__(endog, exog, None, None, missing=missing) self._trend = string_like( @@ -241,47 +243,47 @@ def __init__( self.data.xnames = self.exog_names @property - def ar_lags(self): + def ar_lags(self) -> list[int] | None: """The autoregressive lags included in the model""" lags = list(self._lags) return None if not lags else lags @property - def hold_back(self): + def hold_back(self) -> int: """The number of initial obs. excluded from the estimation sample.""" return self._hold_back @property - def trend(self): + def trend(self) -> str: """The trend used in the model.""" return self._trend @property - def seasonal(self): + def seasonal(self) -> bool: """Flag indicating that the model contains a seasonal component.""" return self._seasonal @property - def deterministic(self): + def deterministic(self) -> DeterministicProcess | None: """The deterministic used to construct the model""" return self._deterministics if self._user_deterministic else None @property - def period(self): + def period(self) -> int | None: """The period of the seasonal component.""" return self._period @property - def df_model(self): + def df_model(self) -> int: """The model degrees of freedom.""" return self._x.shape[1] @property - def exog_names(self): + def exog_names(self) -> list[str]: """Names of exogenous variables included in model""" return self._exog_names - def initialize(self): + def initialize(self) -> None: """Initialize the model (no-op).""" pass @@ -312,7 +314,7 @@ def _check_lags(self) -> Tuple[List[int], int]: ) return list(lags), int(hold_back) - def _setup_regressors(self): + def _setup_regressors(self) -> None: maxlag = self._maxlag hold_back = self._hold_back exog_names = [] @@ -365,7 +367,12 @@ def _setup_regressors(self): self._y, self._x = y, x self._exog_names = exog_names - def fit(self, cov_type="nonrobust", cov_kwds=None, use_t=False): + def fit( + self, + cov_type: str = "nonrobust", + cov_kwds: dict[str, Any] | None = None, + use_t: bool = False, + ) -> AutoRegResults: """ Estimate the model parameters. @@ -448,11 +455,11 @@ def fit(self, cov_type="nonrobust", cov_kwds=None, use_t=False): return AutoRegResultsWrapper(res) - def _resid(self, params): + def _resid(self, params: ArrayLike) -> np.ndarray: params = array_like(params, "params", ndim=2) return self._y.squeeze() - (self._x @ params).squeeze() - def loglike(self, params): + def loglike(self, params: ArrayLike) -> float: """ Log-likelihood of model. @@ -472,7 +479,7 @@ def loglike(self, params): llf = -(nobs / 2) * (np.log(2 * np.pi) + np.log(ssr / nobs) + 1) return llf - def score(self, params): + def score(self, params: ArrayLike) -> np.ndarray: """ Score vector of model. @@ -491,7 +498,7 @@ def score(self, params): resid = self._resid(params) return self._x.T @ resid - def information(self, params): + def information(self, params: ArrayLike) -> np.ndarray: """ Fisher information matrix of model. @@ -511,7 +518,7 @@ def information(self, params): sigma2 = resid @ resid / self.nobs return (self._x.T @ self._x) * (1 / sigma2) - def hessian(self, params): + def hessian(self, params: ArrayLike) -> np.ndarray: """ The Hessian matrix of the model. @@ -527,7 +534,9 @@ def hessian(self, params): """ return -self.information(params) - def _setup_oos_forecast(self, add_forecasts, exog_oos): + def _setup_oos_forecast( + self, add_forecasts: int, exog_oos: ArrayLike2D + ) -> np.ndarray: x = np.zeros((add_forecasts, self._x.shape[1])) oos_exog = self._deterministics.out_of_sample(steps=add_forecasts) n_deterministic = oos_exog.shape[1] @@ -538,7 +547,9 @@ def _setup_oos_forecast(self, add_forecasts, exog_oos): x[:, loc:] = exog_oos[:add_forecasts] return x - def _wrap_prediction(self, prediction, start, end, pad): + def _wrap_prediction( + self, prediction: np.ndarray, start: int, end: int, pad: int + ) -> pd.Series: prediction = np.hstack([np.full(pad, np.nan), prediction]) n_values = end - start + pad if not isinstance(self.data.orig_endog, (pd.Series, pd.DataFrame)): @@ -558,8 +569,15 @@ def _wrap_prediction(self, prediction, start, end, pad): return pd.Series(prediction, index=index) def _dynamic_predict( - self, params, start, end, dynamic, num_oos, exog, exog_oos - ): + self, + params: ArrayLike, + start: int, + end: int, + dynamic: int, + num_oos: int, + exog: np.ndarray, + exog_oos: np.ndarray, + ) -> pd.Series: """ :param params: @@ -612,7 +630,9 @@ def _dynamic_predict( forecasts[h] = reg[h : h + 1] @ params return self._wrap_prediction(forecasts, start, end + 1 + num_oos, adj) - def _static_oos_predict(self, params, num_oos, exog_oos): + def _static_oos_predict( + self, params: ArrayLike, num_oos: int, exog_oos: ArrayLike2D + ) -> np.ndarray: new_x = self._setup_oos_forecast(num_oos, exog_oos) if self._maxlag == 0: return new_x @ params @@ -627,7 +647,15 @@ def _static_oos_predict(self, params, num_oos, exog_oos): forecasts[i] = new_x[i : i + 1] @ params return forecasts - def _static_predict(self, params, start, end, num_oos, exog, exog_oos): + def _static_predict( + self, + params: ArrayLike, + start: int, + end: int, + num_oos: int, + exog: ArrayLike2D, + exog_oos: ArrayLike2D, + ) -> pd.Series: """ Path for static predictions @@ -670,7 +698,21 @@ def _static_predict(self, params, start, end, num_oos, exog, exog_oos): prediction = np.hstack((in_sample, out_of_sample)) return self._wrap_prediction(prediction, start, end + 1 + num_oos, adj) - def _prepare_prediction(self, params, exog, exog_oos, start, end): + def _prepare_prediction( + self, + params: ArrayLike, + exog: ArrayLike2D, + exog_oos: ArrayLike2D, + start: int | str | datetime.datetime | pd.Timestamp | None, + end: int | str | datetime.datetime | pd.Timestamp | None, + ) -> tuple[ + np.ndarray, + Union[np.ndarray | None], + Union[np.ndarray | None], + int, + int, + int, + ]: params = array_like(params, "params") if not isinstance(exog, pd.DataFrame): exog = array_like(exog, "exog", ndim=2, optional=True) @@ -705,13 +747,13 @@ def _parse_dynamic(self, dynamic, start): def predict( self, - params, - start=None, - end=None, - dynamic=False, - exog=None, - exog_oos=None, - ): + params: ArrayLike, + start: int | str | datetime.datetime | pd.Timestamp | None = None, + end: int | str | datetime.datetime | pd.Timestamp | None = None, + dynamic: bool | int = False, + exog: ArrayLike2D | None = None, + exog_oos: ArrayLike2D | None = None, + ) -> pd.Series: """ In-sample prediction and out-of-sample forecasting. @@ -866,6 +908,8 @@ class AutoRegResults(tsa_model.TimeSeriesModelResults): An estimate of the scale of the model. use_t : bool, optional Whether use_t was set in fit + summary_text : str, optional + Additional text to append to results summary """ _cache = {} # for scale setter @@ -878,6 +922,7 @@ def __init__( normalized_cov_params=None, scale=1.0, use_t=False, + summary_text="", ): super().__init__(model, params, normalized_cov_params, scale) self._cache = {} @@ -893,6 +938,7 @@ def __init__( self._max_lag = 0 self._hold_back = self.model.hold_back self.cov_params_default = cov_params + self._summary_text = summary_text def initialize(self, model, params, **kwargs): """ @@ -1661,8 +1707,146 @@ def summary(self, alpha=0.05): ) smry.tables.append(roots_table) + if self._summary_text: + extra_txt = smry.extra_txt if smry.extra_txt is not None else [] + smry.add_extra_txt(extra_txt + [self._summary_text]) return smry + def apply(self, endog, exog=None, refit=False, fit_kwargs=None): + """ + Apply the fitted parameters to new data unrelated to the original data + + Creates a new result object using the current fitted parameters, + applied to a completely new dataset that is assumed to be unrelated to + the model's original data. The new results can then be used for + analysis or forecasting. + + Parameters + ---------- + endog : array_like + New observations from the modeled time-series process. + exog : array_like, optional + New observations of exogenous regressors, if applicable. + refit : bool, optional + Whether to re-fit the parameters, using the new dataset. + Default is False (so parameters from the current results object + are used to create the new results object). + fit_kwargs : dict, optional + Keyword arguments to pass to `fit` (if `refit=True`). + + Returns + ------- + AutoRegResults + Updated results object containing results for the new dataset. + + See Also + -------- + statsmodels.tsa.statespace.mlemodel.MLEResults.apply + + Notes + ----- + The `endog` argument to this method should consist of new observations + that are not necessarily related to the original model's `endog` + dataset. + + Care is needed when using deterministic processes with cyclical + components such as seasonal dummies or Fourier series. These + deterministic components will align to the first observation + in the data and so it is essential that any new data have the + same initial period. + + Examples + -------- + >>> from statsmodels.tsa.ar_model import AutoReg + >>> index = pd.period_range(start='2000', periods=2, freq='A') + >>> original_observations = pd.Series([1.2, 1.5], index=index) + >>> mod = AutoReg(original_observations, lags=1, trend="n") + >>> res = mod.fit() + >>> print(res.params) + y.L1 1.300813 + dtype: float64 + >>> print(res.fittedvalues) + 2001 1.560976 + 2002 1.951220 + Freq: A-DEC, dtype: float64 + >>> print(res.forecast(1)) + 2003 2.601626 + Freq: A-DEC, dtype: float64 + + >>> new_index = pd.period_range(start='1980', periods=3, freq='A') + >>> new_observations = pd.Series([1.4, 0.3, 1.2], index=new_index) + >>> new_res = res.apply(new_observations) + >>> print(new_res.params) + y.L1 1.300813 + dtype: float64 + >>> print(new_res.fittedvalues) + 1980 1.1707 + 1981 1.3659 + 1982 0.2927 + Freq: A-DEC, dtype: float64 + >>> print(new_res.forecast(1)) + 1983 1.1707 + Freq: A-DEC, dtype: float64 + """ + existing = self.model + try: + mod = AutoReg( + endog, + lags=existing.ar_lags, + trend=existing.trend, + seasonal=existing.seasonal, + exog=exog, + hold_back=existing.hold_back, + period=existing.period, + deterministic=existing.deterministic, + old_names=False, + ) + except Exception as exc: + error = ( + "An exception occured during the creation of the cloned " + "AutoReg instance when applying the existing model " + "specification to the new data. The original traceback " + "appears below." + ) + exc.args = (error,) + exc.args + raise exc.with_traceback(exc.__traceback__) + + if (mod.exog is None) != (existing.exog is None): + if existing.exog is not None: + raise ValueError( + "exog must be provided when the original model contained " + "exog variables" + ) + raise ValueError( + "exog must be None when the original model did not contain " + "exog variables" + ) + if ( + existing.exog is not None + and existing.exog.shape[1] != mod.exog.shape[1] + ): + raise ValueError( + f"The number of exog variables passed must match the original " + f"number of exog values ({existing.exog.shape[1]})" + ) + if refit: + fit_kwargs = {} if fit_kwargs is None else fit_kwargs + return mod.fit(**fit_kwargs) + smry_txt = ( + "Parameters and standard errors were estimated using a different " + "dataset and were then applied to this dataset." + ) + res = AutoRegResults( + mod, + self.params, + self.cov_params_default, + self.normalized_cov_params, + use_t=self.use_t, + summary_text=smry_txt, + ) + + return AutoRegResultsWrapper(res) + class AutoRegResultsWrapper(wrap.ResultsWrapper): _attrs = {} @@ -1851,7 +2035,14 @@ class AROrderSelectionResults(object): Contains the information criteria for all fitted model orders. """ - def __init__(self, model, ics, trend, seasonal, period): + def __init__( + self, + model: AutoReg, + ics: list[tuple[int, ...], tuple[float, float, float]], + trend: str, + seasonal: bool, + period: int | None, + ): self._model = model self._ics = ics self._trend = trend @@ -1865,27 +2056,27 @@ def __init__(self, model, ics, trend, seasonal, period): self._hqic = dict([(key, val[2]) for key, val in hqic]) @property - def model(self): + def model(self) -> AutoReg: """The model selected using the chosen information criterion.""" return self._model @property - def seasonal(self): + def seasonal(self) -> bool: """Flag indicating if a seasonal component is included.""" return self._seasonal @property - def trend(self): + def trend(self) -> str: """The trend included in the model selection.""" return self._trend @property - def period(self): + def period(self) -> int | None: """The period of the seasonal component.""" return self._period @property - def aic(self): + def aic(self) -> dict[tuple[int, ...], float]: """ The Akaike information criterion for the models fit. @@ -1896,7 +2087,7 @@ def aic(self): return self._aic @property - def bic(self): + def bic(self) -> dict[tuple[int, ...], float]: """ The Bayesian (Schwarz) information criteria for the models fit. @@ -1907,7 +2098,7 @@ def bic(self): return self._bic @property - def hqic(self): + def hqic(self) -> dict[tuple[int, ...], float]: """ The Hannan-Quinn information criteria for the models fit. @@ -1918,6 +2109,6 @@ def hqic(self): return self._hqic @property - def ar_lags(self): + def ar_lags(self) -> list[int]: """The lags included in the selected model.""" return self._model.ar_lags
diff --git a/statsmodels/tsa/tests/test_ar.py b/statsmodels/tsa/tests/test_ar.py --- a/statsmodels/tsa/tests/test_ar.py +++ b/statsmodels/tsa/tests/test_ar.py @@ -18,7 +18,11 @@ from statsmodels.regression.linear_model import OLS from statsmodels.tools.sm_exceptions import SpecificationWarning, ValueWarning from statsmodels.tools.tools import Bunch -from statsmodels.tsa.ar_model import AutoReg, ar_select_order +from statsmodels.tsa.ar_model import ( + AutoReg, + AutoRegResultsWrapper, + ar_select_order, +) from statsmodels.tsa.arima_process import arma_generate_sample from statsmodels.tsa.deterministic import ( DeterministicProcess, @@ -439,6 +443,7 @@ def test_parameterless_autoreg(): "t_test_pairwise", "wald_test", "wald_test_terms", + "apply", ): continue attr = getattr(res, attr) @@ -1184,3 +1189,55 @@ def test_removal(ar2): AR(ar2) with pytest.raises(NotImplementedError): ARResults(ar2) + + +def test_autoreg_apply(ols_autoreg_result): + res, _ = ols_autoreg_result + y = res.model.endog + n = y.shape[0] // 2 + y = y[:n] + x = res.model.exog + if x is not None: + x = x[:n] + res_apply = res.apply(endog=y, exog=x) + assert "using a different" in str(res_apply.summary()) + assert isinstance(res_apply, AutoRegResultsWrapper) + assert_allclose(res.params, res_apply.params) + exog_oos = None + if res.model.exog is not None: + exog_oos = res.model.exog[-10:] + fcasts_apply = res_apply.forecast(10, exog=exog_oos) + assert isinstance(fcasts_apply, np.ndarray) + assert fcasts_apply.shape == (10,) + + res_refit = res.apply(endog=y, exog=x, refit=True) + assert not np.allclose(res.params, res_refit.params) + assert not np.allclose(res.llf, res_refit.llf) + assert res_apply.fittedvalues.shape == res_refit.fittedvalues.shape + assert not np.allclose(res_apply.llf, res_refit.llf) + if res.model.exog is None: + fcasts_refit = res_refit.forecast(10, exog=exog_oos) + assert isinstance(fcasts_refit, np.ndarray) + assert fcasts_refit.shape == (10,) + assert not np.allclose(fcasts_refit, fcasts_apply) + + +def test_autoreg_apply_exception(reset_randomstate): + y = np.random.standard_normal(250) + mod = AutoReg(y, lags=10) + res = mod.fit() + with pytest.raises(ValueError, match="An exception occured"): + res.apply(y[:5]) + + x = np.random.standard_normal((y.shape[0], 3)) + res = AutoReg(y, lags=1, exog=x).fit() + with pytest.raises(ValueError, match="exog must be provided"): + res.apply(y[50:150]) + x = np.random.standard_normal((y.shape[0], 3)) + res = AutoReg(y, lags=1, exog=x).fit() + with pytest.raises(ValueError, match="The number of exog"): + res.apply(y[50:150], exog=x[50:150, :2]) + + res = AutoReg(y, lags=1).fit() + with pytest.raises(ValueError, match="exog must be None"): + res.apply(y[50:150], exog=x[50:150])
Question: How to apply AutoReg results to new data / help with AutoReg `initialize` Hello, first thanks for great job. I have a question. It's not feature request nor bug. What i want to do with autoreg - I want to be able to just update model instead of new fit (lets call it partial fit or incremental learning) - I want to be able to make predictions on data that has no relation to fitted data (don't know index) - I want to be able to has similar iterface to use score evaluation and cross-validation as in sklearn Why i think it could help others In models based from MLEModel (ARIMA, SARIMAX) I'm able to use `apply` function so I am able to fit the model and then use it for unrelated data. I'm also able to update model with new data with `append`, therefore I'm able to use everything i need Example: ``` from statsmodels.tsa.api import ARIMA data = np.array(range(200)) order = (4, 2, 1) model = ARIMA(data, order=order) fitted_model = model.fit() prediction = fitted_model.forecast(7) new_data = np.array(range(600, 800)) fitted_model = fitted_model.apply(new_data) new_prediction = fitted_model.forecast(7) print(prediction) print(new_prediction) ``` Is there any way how to achieve something similar in autoreg model? My guess is to use `initialize` - create model with already fitted params and then somehow add the data. Is it possible to add simple example to [initialize docs](https://www.statsmodels.org/dev/generated/statsmodels.tsa.ar_model.AutoRegResults.initialize.html#statsmodels.tsa.ar_model.AutoRegResults.initialize)? In apply, there is lovely example, in initialization, there are only described parameters. Similar docs could be applied in other models like `statsmodels.tsa.holtwinters.HoltWintersResults` etc. also And mainly - Is it possible somehow to achieve something similar as apply in autoreg module? I guess somehow data can be added to initialized model... Thanks... Dan Malachov
+1, I'm coming here with the same question from `sktime`, from this issue: https://github.com/alan-turing-institute/sktime/issues/1788 (what do we put in `update` of the forecaster if `update_params=False`) I think it might not be possible, looking at the docs, but I'm not sure. Having the functionality in `statemodels` seems like the "right" place, but we couldn't find it there.
"2022-01-10T13:47:01Z"
0.13dev
[ "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_pvalues", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_llf", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_params", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_summary", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_pickle", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_fpe", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_bse", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSConstant::test_predict", "statsmodels/tsa/tests/test_ar.py::test_dynamic_forecast_smoke[lags:", "statsmodels/tsa/tests/test_ar.py::test_autoreg_predict_smoke[lags:", "statsmodels/tsa/tests/test_ar.py::test_equiv_ols_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_conf_int_ols_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_other_tests_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_f_test_ols_autoreg[AR:", "statsmodels/tsa/tests/test_ar.py::test_autoreg_smoke_plots[lags:", "statsmodels/tsa/tests/test_ar.py::test_spec_errors", "statsmodels/tsa/tests/test_ar.py::test_ar_select_order_smoke", "statsmodels/tsa/tests/test_ar.py::test_predict_errors", "statsmodels/tsa/tests/test_ar.py::test_parameterless_autoreg", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag12]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_roots", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag9]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag11]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag13]", "statsmodels/tsa/tests/test_ar.py::test_ar_model_predict", "statsmodels/tsa/tests/test_ar.py::test_autoreg_named_series[False]", "statsmodels/tsa/tests/test_ar.py::test_dynamic_predictions", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag5]", "statsmodels/tsa/tests/test_ar.py::test_diagnostic_summary_short", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag8]", "statsmodels/tsa/tests/test_ar.py::test_dynamic_against_sarimax", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag14]", "statsmodels/tsa/tests/test_ar.py::test_invalid_dynamic", "statsmodels/tsa/tests/test_ar.py::test_old_names", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag0]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_series", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag1]", "statsmodels/tsa/tests/test_ar.py::test_dynamic_predictions_oos", "statsmodels/tsa/tests/test_ar.py::test_autoreg_forecast_period_index", "statsmodels/tsa/tests/test_ar.py::test_autoreg_named_series[True]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag15]", "statsmodels/tsa/tests/test_ar.py::test_predict_exog", "statsmodels/tsa/tests/test_ar.py::test_removal", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag10]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_predict_forecast_equiv", "statsmodels/tsa/tests/test_ar.py::test_predict_irregular_ar", "statsmodels/tsa/tests/test_ar.py::test_forecast_start_end_equiv[False]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag7]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_summary_corner[True]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_no_variables", "statsmodels/tsa/tests/test_ar.py::test_predict_seasonal", "statsmodels/tsa/tests/test_ar.py::test_exog_prediction", "statsmodels/tsa/tests/test_ar.py::test_deterministic", "statsmodels/tsa/tests/test_ar.py::test_equiv_dynamic", "statsmodels/tsa/tests/test_ar.py::test_autoreg_resids", "statsmodels/tsa/tests/test_ar.py::test_autoreg_summary_corner[False]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_constant_column_trend", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag6]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_start[21]", "statsmodels/tsa/tests/test_ar.py::test_forecast_start_end_equiv[True]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_plot_err", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag4]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag2]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_start[25]", "statsmodels/tsa/tests/test_ar.py::test_autoreg_score", "statsmodels/tsa/tests/test_ar.py::test_autoreg_info_criterion[lag3]", "statsmodels/tsa/tests/test_ar.py::test_ar_order_select", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_llf", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_pickle", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_fpe", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_summary", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_pvalues", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_predict", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_params", "statsmodels/tsa/tests/test_ar.py::TestAutoRegOLSNoConstant::test_bse" ]
[ "statsmodels/tsa/tests/test_ar.py::test_autoreg_apply[AR:", "statsmodels/tsa/tests/test_ar.py::test_autoreg_apply_exception" ]
Python
[]
[]
statsmodels/statsmodels
8,047
statsmodels__statsmodels-8047
[ "8046" ]
b52f0b1e4aadaf21395db7c1c73d36311a11dc0a
diff --git a/statsmodels/nonparametric/smoothers_lowess.py b/statsmodels/nonparametric/smoothers_lowess.py --- a/statsmodels/nonparametric/smoothers_lowess.py +++ b/statsmodels/nonparametric/smoothers_lowess.py @@ -234,7 +234,7 @@ def lowess(endog, exog, frac=2.0/3.0, it=3, delta=0.0, xvals=None, is_sorted=Fal frac=frac, it=it-1, delta=delta, given_xvals=False) else: weights = np.ones_like(x) - xvalues = np.ascontiguousarray(xvalues) + xvalues = np.ascontiguousarray(xvalues, dtype=float) # Then run once more using those supplied weights at the points provided by xvals # No extra iterations are performed here since weights are fixed res, _ = _lowess(y, x, xvalues, weights,
diff --git a/statsmodels/nonparametric/tests/test_lowess.py b/statsmodels/nonparametric/tests/test_lowess.py --- a/statsmodels/nonparametric/tests/test_lowess.py +++ b/statsmodels/nonparametric/tests/test_lowess.py @@ -12,13 +12,14 @@ import os import numpy as np -import pytest from numpy.testing import ( - assert_almost_equal, assert_, - assert_raises, + assert_allclose, + assert_almost_equal, assert_equal, + assert_raises, ) +import pytest from statsmodels.nonparametric.smoothers_lowess import lowess @@ -227,7 +228,8 @@ def test_duplicate_xs(self): def test_spike(self): # see 7700 - # Create a curve that is easy to fit at first but gets harder further along. + # Create a curve that is easy to fit at first but gets + # harder further along. # This used to give an outlier bad fit at position 961 x = np.linspace(0, 10, 1001) y = np.cos(x ** 2 / 5) @@ -285,3 +287,11 @@ def test_returns_inputs(): x = np.arange(20) result = lowess(y, x, frac=0.4) assert_almost_equal(result, np.column_stack((x, y))) + + +def test_xvals_dtype(reset_randomstate): + y = [0] * 10 + [1] * 10 + x = np.arange(20) + # Previously raised ValueError: Buffer dtype mismatch + results_xvals = lowess(y, x, frac=0.4, xvals=x[:5]) + assert_allclose(results_xvals, np.zeros(5), atol=1e-12)
The xvals arg in smoothers_lowess gets ValueError #### Describe the bug xvals arg in statsmodels.nonparametric.smoothers_lowess.lowess gets ValueError The function works fine when xvals=None, but raises this ValueError on any xval value, regardless what data type is supplied. #### Code Sample, a copy-pastable example if possible ``` python3 Python 3.7.4 (default, Nov 2 2019, 13:31:25) [Clang 10.0.1 (clang-1001.0.46.4)] on darwin >>> import statsmodels >>> statsmodels.__version__ '0.13.1' >>> import numpy as np >>> from statsmodels.nonparametric.smoothers_lowess import lowess >>> np.ascontiguousarray(xvals) array([ 6, 24, 25, 35, 31, 37, 38, 38, 38, 38, 39]) >>> >>> ff = lowess(yy3, xx3, frac= .2, it=1, xvals=None) >>> >>> ff = lowess(yy3, xx3, frac= .2, it=1, xvals=yy3) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/shuzhao/.pyenv/versions/3.7.4/lib/python3.7/site-packages/statsmodels/nonparametric/smoothers_lowess.py", line 241, in lowess frac=frac, it=0, delta=delta, given_xvals=True) File "statsmodels/nonparametric/_smoothers_lowess.pyx", line 24, in statsmodels.nonparametric._smoothers_lowess.lowess ValueError: Buffer dtype mismatch, expected 'DTYPE_t' but got 'long' ```
Cast your array to floats.
"2022-01-27T16:11:34Z"
0.13dev
[ "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_options", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_import", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_delta_rdef", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_simple", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_iter_0_3", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_frac_2_3", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_duplicate_xs", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_range", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_exog_predict", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_delta_0", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_spike", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_delta_1", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_iter_0", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_frac_1_5", "statsmodels/nonparametric/tests/test_lowess.py::TestLowess::test_flat", "statsmodels/nonparametric/tests/test_lowess.py::test_returns_inputs" ]
[ "statsmodels/nonparametric/tests/test_lowess.py::test_xvals_dtype" ]
Python
[]
[]
statsmodels/statsmodels
8,093
statsmodels__statsmodels-8093
[ "8049" ]
2891644b53920fa2a1b451ff659ecfa90c2727c5
diff --git a/statsmodels/stats/proportion.py b/statsmodels/stats/proportion.py --- a/statsmodels/stats/proportion.py +++ b/statsmodels/stats/proportion.py @@ -1891,7 +1891,7 @@ def _std_2prop_power(diff, p2, ratio=1, alpha=0.05, value=0): p2_alt = p2 p1_alt = p2_alt + diff - std_null = _std_diff_prop(p1_vnull, p2_vnull) + std_null = _std_diff_prop(p1_vnull, p2_vnull, ratio=nobs_ratio) std_alt = _std_diff_prop(p1_alt, p2_alt, ratio=nobs_ratio) return p_pooled, std_null, std_alt @@ -1947,16 +1947,16 @@ def power_proportions_2indep(diff, prop2, nobs1, ratio=1, alpha=0.05, pooled proportion, used for std_null std_null standard error of difference under the null hypothesis (without - sqrt(nobs)) + sqrt(nobs1)) std_alt standard error of difference under the alternative hypothesis - (without sqrt(nobs)) + (without sqrt(nobs1)) """ # TODO: avoid possible circular import, check if needed from statsmodels.stats.power import normal_power_het - p_pooled, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=1, - alpha=0.05, value=0) + p_pooled, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=ratio, + alpha=alpha, value=value) pow_ = normal_power_het(diff, nobs1, alpha, std_null=std_null, std_alternative=std_alt,
diff --git a/statsmodels/stats/tests/test_proportion.py b/statsmodels/stats/tests/test_proportion.py --- a/statsmodels/stats/tests/test_proportion.py +++ b/statsmodels/stats/tests/test_proportion.py @@ -915,6 +915,16 @@ def test_power_2indep(): alternative='two-sided') assert_allclose(n2, n, rtol=1e-13) + # with nobs ratio != 1 + # note Stata has reversed ratio compared to ours, see #8049 + pwr_st = 0.7995659211532175 + n = 154 + res = power_proportions_2indep(-0.1, 0.2, n, ratio=2.) + assert_allclose(res.power, pwr_st, atol=1e-7) + + n2 = samplesize_proportions_2indep_onetail(-0.1, 0.2, pwr_st, ratio=2) + assert_allclose(n2, n, rtol=1e-4) + @pytest.mark.parametrize("count", np.arange(10, 90, 5)) @pytest.mark.parametrize("method", list(probci_methods.keys()) + ["binom_test"])
Bug in /statsmodels/stats/proportion.py? #### Describe the bug In the `_std_2prop_power` function, when [calculating](https://github.com/statsmodels/statsmodels/blob/b722afa1ceb3b90df33491d64dfc319f9ab89a4a/statsmodels/stats/proportion.py#L1894) `std_null`, the parameter `ratio` (sample size ratio, nobs2 = ratio * nobs1) is not correctly passed to `_std_diff_prop`, Instead, default value of one is used. This potential bug would impact both `samplesize_proportions_2indep_onetail` and `power_proportions_2indep`, when the two samples are unequally sized. #### Code Sample ```python from statsmodels.stats import proportion diff, p2, ratio = 0.1, 0.1, 2 p1 = p2 + diff p_pooled = (p1 + p2 * ratio) / (1 + ratio) p1_vnull, p2_vnull = p_pooled, p_pooled # the current behavior std_null_statsmodels = proportion._std_diff_prop(p1_vnull, p2_vnull) # the expected behavior std_null_correct = proportion._std_diff_prop(p1_vnull, p2_vnull, ratio = ratio) # their difference print([std_null_statsmodels, std_null_correct]) ``` <details> See formula (4) of this paper for technical details: Fleiss, Joseph L., Alex Tytun, and Hans K. Ury. A simple approximation for calculating sample sizes for comparing independent proportions. Biometrics (1980): 343-346. Basically, the current behavior incorrectly assumes `ratio = 1`, for the first term in the numerator. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output Change [this](https://github.com/statsmodels/statsmodels/blob/b722afa1ceb3b90df33491d64dfc319f9ab89a4a/statsmodels/stats/proportion.py#L1894) line to `std_null = _std_diff_prop(p1_vnull, p2_vnull, ratio=nobs_ratio)`
power_proportions_2indep doesn't propagate any keywords (although alpha and value are unused arguments in the function) `p_pooled, std_null, std_alt = _std_2prop_power(diff, prop2, ratio=1, alpha=0.05, value=0)` `std_null = _std_diff_prop(p1_vnull, p2_vnull)` I need to check the normalization/scaling here (nobs or nobs1) The variance in both samples are the same in this null case. (but the null could have a nonzero `value` Thanks for the quick reply. In the null case, `p1 = p2 = p_pooled`, so I agree that `p1(1 - p1) = p2(1 - p2) = p_pooled(1 - p_pooled)`. However, variance of the proportion estimate for sample 2 should have been be scaled by `ratio = nobs2 / nobs1`, which was not done. If you check the paper I referenced (in details above), or other sample size calculators such as this one (see the `r+1` term for $m^\prime$): https://www2.ccrb.cuhk.edu.hk/stat/proportion/Casagrande.htm You'll see what I am talking about. some more info: nonzero `value` is not supported, Would be better to raise ValueError if value != 0, or implement it? (AFAIR, other packages don't allow nonzero value. and we don't have power for tost_proportions_2indep) ``` value : float currently only `value=0`, i.e. equality testing, is supported ``` ``` std_null standard error of difference under the null hypothesis (without sqrt(nobs)) std_alt standard error of difference under the alternative hypothesis (without sqrt(nobs)) ``` AFAICS, this should be sqrt(nobs1) instead of sqrt(nobs) i.e. AFAIR everything is normalized by `nobs1` which is also the target of the sample size computation all unit tests in `test_power_2indep` (against R package ? and Stata use ratio=1) summ: 2 bugs - power_proportions_2indep does not propagate ratio to _std_2prop_power, and - _std_2prop_power does not use ratio in std_null as pointed out in the first message and clarify docstring for std @jiannanlu Thanks for finding and reporting the bug related aside: #6675 original merged PR I don't see anything about using the power_proportions_2indep function with the power classes to solve for nobs1 and similar. Solving for nobs-`ratio`, requires that it is properly propagated during rootfinding. Finding effect_size is also "non-standard". We need to make sure std_alt adjusts as we change the alt value (diff) or effect size. brief search and skimming It looks like they have the one-tail sample size computation with a non-zero null "value" (either diff or risk ratio) (I didn't read their discussion for the estimator, e.g. Miettinen and Nurminen MLE under Null diff restriction.) Farrington, Conor P., and Godfrey Manning. "Test statistics and sample size formulae for comparative binomial trials with null hypothesis of non‐zero risk difference or non‐unity relative risk." Statistics in medicine 9, no. 12 (1990): 1447-1454. see also https://github.com/statsmodels/statsmodels/issues/4828#issuecomment-592985459 @bashtage This needs to go into the next bugfix release I can prepare it later today or tomorrow Please get it in. It looks like aside from a release note 0.13.2 is ready.
"2022-02-01T21:01:16Z"
0.13dev
[ "statsmodels/stats/tests/test_proportion.py::test_proportion_effect_size", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[beta]", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions_zeros", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[wilson]", "statsmodels/stats/tests/test_proportion.py::test_multinomial_proportions_errors", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[normal]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[jeffreys]", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[agresti_coull]", "statsmodels/stats/tests/test_proportion.py::test_samplesize_confidenceinterval_prop", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count11-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count20-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count43-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count40-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count24-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count37-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count19-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count34-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count1-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count36-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count9-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count32-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count35-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count8-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count44-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count7-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count2-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count46-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count26-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count29-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count32-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_score_confint_koopman_nam", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count11-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count27-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count10-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count9-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count24-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count22-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count46-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count5-47]", "statsmodels/stats/tests/test_proportion.py::test_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count3-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count30-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count3-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count31-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count10-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count25-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count44-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count12-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count13-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count33-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count16-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count29-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count37-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_binom_rejection_interval", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count14-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count42-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count38-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count1-47]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop_norm", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count0-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count25-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count20-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count35-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ztost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count2-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count42-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count12-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count16-50]", "statsmodels/stats/tests/test_proportion.py::test_equivalence_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count6-50]", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep_propcis", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count33-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count30-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count26-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count5-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count21-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count6-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_proportion_ztests", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count18-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count15-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count47-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count39-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count38-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count18-50]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count31-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_score_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count45-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count23-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count0-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count7-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count34-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count21-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count28-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count47-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count4-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count41-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count27-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count17-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count36-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count23-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count43-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count19-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count13-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count28-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_power_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count14-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_int_check", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count17-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count15-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count4-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count40-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count8-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count39-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count41-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count22-50]", "statsmodels/stats/tests/test_proportion.py::test_binom_test", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[count45-47]", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_scalar", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_number_pairs_1493", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_pairwiseproptest", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_proptest", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_default_values" ]
[ "statsmodels/stats/tests/test_proportion.py::test_power_2indep" ]
Python
[]
[]
statsmodels/statsmodels
8,134
statsmodels__statsmodels-8134
[ "8133" ]
63b4bd4cae3fcdd3b230e49bba09fb03111afe49
diff --git a/statsmodels/iolib/summary.py b/statsmodels/iolib/summary.py --- a/statsmodels/iolib/summary.py +++ b/statsmodels/iolib/summary.py @@ -764,6 +764,10 @@ def _repr_html_(self): '''Display as HTML in IPython notebook.''' return self.as_html() + def _repr_latex_(self): + '''Display as LaTeX when converting IPython notebook to PDF.''' + return self.as_latex() + def add_table_2cols(self, res, title=None, gleft=None, gright=None, yname=None, xname=None): """ diff --git a/statsmodels/iolib/summary2.py b/statsmodels/iolib/summary2.py --- a/statsmodels/iolib/summary2.py +++ b/statsmodels/iolib/summary2.py @@ -30,6 +30,10 @@ def _repr_html_(self): """Display as HTML in IPython notebook.""" return self.as_html() + def _repr_latex_(self): + '''Display as LaTeX when converting IPython notebook to PDF.''' + return self.as_latex() + def add_df(self, df, index=True, header=True, float_format='%.4f', align='r'): """ diff --git a/statsmodels/iolib/table.py b/statsmodels/iolib/table.py --- a/statsmodels/iolib/table.py +++ b/statsmodels/iolib/table.py @@ -221,6 +221,9 @@ def __repr__(self): def _repr_html_(self, **fmt_dict): return self.as_html(**fmt_dict) + def _repr_latex_(self, center=True, **fmt_dict): + return self.as_latex_tabular(center, **fmt_dict) + def _add_headers_stubs(self, headers, stubs): """Return None. Adds headers and stubs to table, if these were provided at initialization.
diff --git a/statsmodels/iolib/tests/test_summary.py b/statsmodels/iolib/tests/test_summary.py --- a/statsmodels/iolib/tests/test_summary.py +++ b/statsmodels/iolib/tests/test_summary.py @@ -5,8 +5,10 @@ import numpy as np # noqa: F401 import pytest +from numpy.testing import assert_equal from statsmodels.datasets import macrodata +from statsmodels.tools.tools import add_constant from statsmodels.regression.linear_model import OLS @@ -31,6 +33,30 @@ def test_wrong_len_xname(reset_randomstate): res.summary(xname=['x1', 'x2', 'x3']) +class TestSummaryLatex(object): + def test__repr_latex_(self): + desired = r''' +\begin{center} +\begin{tabular}{lcccccc} +\toprule + & \textbf{coef} & \textbf{std err} & \textbf{t} & \textbf{P$> |$t$|$} & \textbf{[0.025} & \textbf{0.975]} \\ +\midrule +\textbf{const} & 7.2248 & 0.866 & 8.346 & 0.000 & 5.406 & 9.044 \\ +\textbf{x1} & -0.6609 & 0.177 & -3.736 & 0.002 & -1.033 & -0.289 \\ +\bottomrule +\end{tabular} +\end{center} +''' + x = [1, 5, 7, 3, 5, 5, 8, 3, 3, 4, 6, 4, 2, 7, 4, 2, 1, 9, 2, 6] + x = add_constant(x) + y = [6, 4, 2, 7, 4, 2, 1, 9, 2, 6, 1, 5, 7, 3, 5, 5, 8, 3, 3, 4] + reg = OLS(y, x).fit() + + actual = reg.summary().tables[1]._repr_latex_() + actual = '\n%s\n' % actual + assert_equal(actual, desired) + + if __name__ == '__main__': from statsmodels.regression.tests.test_regression import TestOLS diff --git a/statsmodels/iolib/tests/test_summary2.py b/statsmodels/iolib/tests/test_summary2.py --- a/statsmodels/iolib/tests/test_summary2.py +++ b/statsmodels/iolib/tests/test_summary2.py @@ -115,6 +115,38 @@ def test_summary_col_ordering_preserved(self): if line.startswith(variable): assert line in str(actual) + def test__repr_latex_(self): + desired = r''' +\begin{table} +\caption{} +\label{} +\begin{center} +\begin{tabular}{lll} +\hline + & y I & y II \\ +\hline +const & 7.7500 & 12.4231 \\ + & (1.1058) & (3.1872) \\ +x1 & -0.7500 & -1.5769 \\ + & (0.2368) & (0.6826) \\ +R-squared & 0.7697 & 0.6401 \\ +R-squared Adj. & 0.6930 & 0.5202 \\ +\hline +\end{tabular} +\end{center} +\end{table} +''' + x = [1, 5, 7, 3, 5] + x = add_constant(x) + y1 = [6, 4, 2, 7, 4] + y2 = [8, 5, 0, 12, 4] + reg1 = OLS(y1, x).fit() + reg2 = OLS(y2, x).fit() + + actual = summary_col([reg1, reg2])._repr_latex_() + actual = '\n%s\n' % actual + assert_equal(actual, desired) + def test_OLSsummary(self): # Test that latex output of regular OLS output still contains # multiple tables diff --git a/statsmodels/iolib/tests/test_table.py b/statsmodels/iolib/tests/test_table.py --- a/statsmodels/iolib/tests/test_table.py +++ b/statsmodels/iolib/tests/test_table.py @@ -241,3 +241,24 @@ def test_default_alignment(self): txt_fmt=default_txt_fmt) actual = '\n%s\n' % actual.as_text() assert_equal(desired, str(actual)) + + def test__repr_latex(self): + desired = r""" +\begin{center} +\begin{tabular}{lcc} +\toprule + & \textbf{header1} & \textbf{header2} \\ +\midrule +\textbf{stub1} & 5.394 & 29.3 \\ +\textbf{stub2} & 343 & 34.2 \\ +\bottomrule +\end{tabular} +\end{center} +""" + testdata = [[5.394, 29.3], [343, 34.2]] + teststubs = ('stub1', 'stub2') + testheader = ('header1', 'header2') + tbl = SimpleTable(testdata, testheader, teststubs, + txt_fmt=default_txt_fmt) + actual = '\n%s\n' % tbl._repr_latex_() + assert_equal(actual, desired)
Displaying summary tables in LaTeX when converting Jupyter Notebooks to PDF via nbconvert #### Is your feature request related to a problem? Please describe There's currently no built-in method to display tables in LaTeX when converting Jupyter Notebooks to PDF via nbconvert. Output is a string in the converted PDF. #### Describe the solution you'd like Including a method along the lines of: ``` def _repr_latex_(self): return self.as_latex() ``` somewhere in the summary table scripts would mean the tables could be displayed as both HTML directly in the Jupyter notebook and LaTeX in the converted PDF. #### Describe alternatives you have considered I can manually patch my Jupyter notebook to produce a centred LaTeX summary table in my output PDF file: ``` def _repr_latex_(self): return f"\\begin{{center}}\n{self.as_latex()}\n\\end{{center}}" statsmodels.iolib.summary.Summary._repr_latex_ = _repr_latex_ ``` however, it would be nice to have this feature built directly into statsmodels. :) If I have time I'll try making a PR myself, but I've never contributed to statsmodels before, so it might take me a bit of time to ensure that I'm doing everything correctly.
"2022-02-18T13:21:50Z"
0.13dev
[ "statsmodels/iolib/tests/test_summary.py::test_wrong_len_xname", "statsmodels/iolib/tests/test_summary.py::test_escaped_variable_name", "statsmodels/iolib/tests/test_summary2.py::test_summary_col_r2", "statsmodels/iolib/tests/test_summary2.py::test_ols_summary_rsquared_label", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summarycol_float_format", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_OLSsummary", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summary_col_ordering_preserved", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summarycol", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summarycol_drop_omitted", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test_simple_table_4", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test_regression_with_tuples", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test_simple_table_special_chars", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test_simple_table_1", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test_default_alignment", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test_simple_table_2", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test_simple_table_3" ]
[ "statsmodels/iolib/tests/test_summary.py::TestSummaryLatex::test__repr_latex_", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test__repr_latex_", "statsmodels/iolib/tests/test_table.py::TestSimpleTable::test__repr_latex" ]
Python
[]
[]
statsmodels/statsmodels
8,137
statsmodels__statsmodels-8137
[ "8135" ]
59a553e8102257a1f6c7bcd42e5fd1abd94c1c51
diff --git a/statsmodels/stats/rates.py b/statsmodels/stats/rates.py --- a/statsmodels/stats/rates.py +++ b/statsmodels/stats/rates.py @@ -7,6 +7,8 @@ import numpy as np +import warnings + from scipy import stats from statsmodels.stats.base import HolderTuple @@ -57,7 +59,7 @@ def test_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1, - 'smaller' : H1: ratio of rates is smaller than ratio_null etest_kwds: dictionary Additional parameters to be passed to the etest_poisson_2indep - function, namely ygrid. + function, namely y_grid. Returns ------- @@ -150,7 +152,8 @@ def test_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1, def etest_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1, - method='score', alternative='2-sided', ygrid=None): + method='score', alternative='2-sided', ygrid=None, + y_grid=None): """E-test for ratio of two sample Poisson rates If the two Poisson rates are g1 and g2, then the Null hypothesis is @@ -184,11 +187,15 @@ def etest_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=1, 'larger' : H1: ratio of rates is larger than ratio_null 'smaller' : H1: ratio of rates is smaller than ratio_null - ygrid : None or 1-D ndarray + y_grid : None or 1-D ndarray Grid values for counts of the Poisson distribution used for computing the pvalue. By default truncation is based on an upper tail Poisson quantiles. + ygrid : None or 1-D ndarray + Same as y_grid. Deprecated. If both y_grid and ygrid are provided, + ygrid will be ignored. + Returns ------- stat_sample : float @@ -237,15 +244,23 @@ def stat_func(x1, x2): stat_sample = stat_func(y1, y2) + if ygrid is not None: + warnings.warn("ygrid is deprecated, use y_grid", DeprecationWarning) + y_grid = y_grid if y_grid is not None else ygrid + # The following uses a fixed truncation for evaluating the probabilities # It will currently only work for small counts, so that sf at truncation # point is small # We can make it depend on the amount of truncated sf. # Some numerical optimization or checks for large means need to be added. - if ygrid is None: + if y_grid is None: threshold = stats.poisson.isf(1e-13, max(mean1, mean2)) threshold = max(threshold, 100) # keep at least 100 y_grid = np.arange(threshold + 1) + else: + y_grid = np.asarray(y_grid) + if y_grid.ndim != 1: + raise ValueError("y_grid needs to be None or 1-dimensional array") pdf1 = stats.poisson.pmf(y_grid, mean1) pdf2 = stats.poisson.pmf(y_grid, mean2)
diff --git a/statsmodels/stats/tests/test_rates_poisson.py b/statsmodels/stats/tests/test_rates_poisson.py --- a/statsmodels/stats/tests/test_rates_poisson.py +++ b/statsmodels/stats/tests/test_rates_poisson.py @@ -1,6 +1,8 @@ import pytest +import warnings +from numpy import arange from numpy.testing import assert_allclose, assert_equal # we cannot import test_poisson_2indep directly, pytest treats that as test @@ -245,3 +247,31 @@ def test_alternative(case): _, pv = smr.test_poisson_2indep(count1, n1, count2, n2, method=meth, ratio_null=1.2, alternative=alt) assert_allclose(pv, cases_alt[case], rtol=1e-13) + + +def test_y_grid_regression(): + y_grid = arange(1000) + + _, pv = etest_poisson_2indep(60, 51477.5, 30, 54308.7, y_grid=y_grid) + assert_allclose(pv, 0.000567261758250953, atol=1e-15) + + _, pv = etest_poisson_2indep(41, 28010, 15, 19017, y_grid=y_grid) + assert_allclose(pv, 0.03782053187021494, atol=1e-15) + + _, pv = etest_poisson_2indep(1, 1, 1, 1, y_grid=[1]) + assert_allclose(pv, 0.1353352832366127, atol=1e-15) + + +def test_invalid_y_grid(): + # check ygrid deprecation + warnings.simplefilter("always") + with warnings.catch_warnings(record=True) as w: + etest_poisson_2indep(1, 1, 1, 1, ygrid=[1]) + assert len(w) == 1 + assert issubclass(w[0].category, DeprecationWarning) + assert "ygrid" in str(w[0].message) + + # check y_grid validity + with pytest.raises(ValueError) as e: + etest_poisson_2indep(1, 1, 1, 1, y_grid=1) + assert "y_grid" in str(e.value)
BUG: etest_poisson_2indep() not allowing custom ygrid [The documentation](https://www.statsmodels.org/dev/generated/statsmodels.stats.rates.etest_poisson_2indep.html#statsmodels.stats.rates.etest_poisson_2indep) states that `ygrid` is a parameter we can supply to `etest_poisson_2indep`, but this is broken at the moment: ```python from statsmodels.stats.rates import etest_poisson_2indep etest_poisson_2indep(1, 1, 1, 1, ygrid=1) ``` Output: ```bash Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/voditas/.local/lib/python3.7/site-packages/statsmodels/stats/rates.py", line 249, in etest_poisson_2indep pdf1 = stats.poisson.pmf(y_grid, mean1) UnboundLocalError: local variable 'y_grid' referenced before assignment ``` This happens in version 0.13.2 (but probably others too) for any value of `ygrid` that is not `None` and is because of the likely accidental usage of `y_grid` instead of `ygrid` in the function: ```python if ygrid is None: # uses ygrid, like the API threshold = stats.poisson.isf(1e-13, max(mean1, mean2)) threshold = max(threshold, 100) # keep at least 100 y_grid = np.arange(threshold + 1) # changes to y_grid pdf1 = stats.poisson.pmf(y_grid, mean1) # if ygrid was set, an error will be thrown pdf2 = stats.poisson.pmf(y_grid, mean2) ``` The fix is easy - replace usages of `y_grid` with `ygrid`, as in the API. I will create a PR with this fix.
thanks for catching this you could just do y_grid = ygrid in a new `else` part. ``` if ygrid is None: ... else: <needs some checking and asarray> y_grid = ... ygrid ``` I thought of doing that just to make less code changes, but I think it's confusing to have both `ygrid` and `y_grid`. There is no reason to make a distinction, is there? It's taste question when there is no convention yet, and I'm changing my pattern every once in a while. Our code convention has some preference for adding underlines, but not strict in borderline cases. I like `y_grid` Recently I added a `y_values` option to count model predict (which doesn't need to be a grid). So, I guess now I prefer to change the keyword in the function itself to `y_grid`, i.e. add y_grid and deprecate `ygrid`. Or for now, at least we should make a renaming y_grid to ygrid
"2022-02-18T17:20:02Z"
0.13dev
[ "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case20]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case7]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case2]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case11]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case16]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case9]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case4]", "statsmodels/stats/tests/test_rates_poisson.py::test_tost_poisson", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case0]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case3]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case19]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case5]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case17]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case15]", "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson_r", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case8]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case6]", "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case13]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case14]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case12]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case18]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case1]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case10]" ]
[ "statsmodels/stats/tests/test_rates_poisson.py::test_invalid_y_grid", "statsmodels/stats/tests/test_rates_poisson.py::test_y_grid_regression" ]
Python
[]
[]
statsmodels/statsmodels
8,154
statsmodels__statsmodels-8154
[ "7985" ]
7f65348146b1d9052548a0d1690ca4ead86e521e
diff --git a/statsmodels/stats/proportion.py b/statsmodels/stats/proportion.py --- a/statsmodels/stats/proportion.py +++ b/statsmodels/stats/proportion.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -"""Tests and Confidence Intervals for Binomial Proportions +""" +Tests and Confidence Intervals for Binomial Proportions Created on Fri Mar 01 00:23:07 2013 @@ -8,48 +9,135 @@ """ from statsmodels.compat.python import lzip - +from typing import Callable, Tuple import numpy as np +import pandas as pd from scipy import optimize, stats from statsmodels.stats.base import AllPairsResults, HolderTuple from statsmodels.stats.weightstats import _zstat_generic2 from statsmodels.tools.sm_exceptions import HypothesisTestWarning from statsmodels.tools.testing import Holder +from statsmodels.tools.validation import array_like FLOAT_INFO = np.finfo(float) -def proportion_confint(count, nobs, alpha=0.05, method='normal'): +def _bound_proportion_confint( + func: Callable[[float], float], qi: float, lower: bool = True +) -> float: + """ + Try hard to find a bound different from eps/1 - eps in proportion_confint + + Parameters + ---------- + func : callable + Callable function to use as the objective of the search + qi : float + The empirical success rate + lower : bool + Whether to fund a lower bound for the left side of the CI + + Returns + ------- + float + The coarse bound + """ + default = FLOAT_INFO.eps if lower else 1.0 - FLOAT_INFO.eps + + def step(v): + return v / 8 if lower else v + (1.0 - v) / 8 + + x = step(qi) + w = func(x) + cnt = 1 + while w > 0 and cnt < 10: + x = step(x) + w = func(x) + cnt += 1 + return x if cnt < 10 else default + + +def _bisection_search_conservative( + func: Callable[[float], float], lb: float, ub: float, steps: int = 27 +) -> Tuple[float, float]: + """ + Private function used as a fallback by proportion_confint + + Used when brentq returns a non-conservative bound for the CI + + Parameters + ---------- + func : callable + Callable function to use as the objective of the search + lb : float + Lower bound + ub : float + Upper bound + steps : int + Number of steps to use in the bisection + + Returns + ------- + est : float + The estimated value. Will always produce a negative value of func + func_val : float + The value of the function at the estimate + """ + upper = func(ub) + lower = func(lb) + best = upper if upper < 0 else lower + best_pt = ub if upper < 0 else lb + if np.sign(lower) == np.sign(upper): + raise ValueError("problem with signs") + mp = (ub + lb) / 2 + mid = func(mp) + if (mid < 0) and (mid > best): + best = mid + best_pt = mp + for _ in range(steps): + if np.sign(mid) == np.sign(upper): + ub = mp + upper = mid + else: + lb = mp + mp = (ub + lb) / 2 + mid = func(mp) + if (mid < 0) and (mid > best): + best = mid + best_pt = mp + return best_pt, best + + +def proportion_confint(count, nobs, alpha=0.05, method="normal"): """ Confidence interval for a binomial proportion Parameters ---------- - count : int or array_array_like - number of successes, can be pandas Series or DataFrame - nobs : int - total number of trials + count : {int, array_like} + number of successes, can be pandas Series or DataFrame. Arrays + much contain integer values. + nobs : {int, array_like} + total number of trials. Arrays much contain integer values. alpha : float in (0, 1) significance level, default 0.05 - method : {'normal', 'agresti_coull', 'beta', 'wilson', 'binom_test'} - default: 'normal' - method to use for confidence interval, - currently available methods : + method : {"normal", "agresti_coull", "beta", "wilson", "binom_test"} + default: "normal" + method to use for confidence interval. Supported methods: - `normal` : asymptotic normal approximation - `agresti_coull` : Agresti-Coull interval - `beta` : Clopper-Pearson interval based on Beta distribution - `wilson` : Wilson Score interval - `jeffreys` : Jeffreys Bayesian Interval - - `binom_test` : experimental, inversion of binom_test + - `binom_test` : Numerical inversion of binom_test Returns ------- - ci_low, ci_upp : float, ndarray, or pandas Series or DataFrame + ci_low, ci_upp : {float, ndarray, Series DataFrame} lower and upper confidence level with coverage (approximately) 1-alpha. - When a pandas object is returned, then the index is taken from the - `count`. + When a pandas object is returned, then the index is taken from `count`. Notes ----- @@ -57,13 +145,13 @@ def proportion_confint(count, nobs, alpha=0.05, method='normal'): but is in general conservative. Most of the other methods have average coverage equal to 1-alpha, but will have smaller coverage in some cases. - The 'beta' and 'jeffreys' interval are central, they use alpha/2 in each + The "beta" and "jeffreys" interval are central, they use alpha/2 in each tail, and alpha is not adjusted at the boundaries. In the extreme case when `count` is zero or equal to `nobs`, then the coverage will be only - 1 - alpha/2 in the case of 'beta'. + 1 - alpha/2 in the case of "beta". The confidence intervals are clipped to be in the [0, 1] interval in the - case of 'normal' and 'agresti_coull'. + case of "normal" and "agresti_coull". Method "binom_test" directly inverts the binomial test in scipy.stats. which has discrete steps. @@ -79,11 +167,10 @@ def proportion_confint(count, nobs, alpha=0.05, method='normal'): Estimation for a Binomial Proportion", Statistical Science 16 (2): 101–133. doi:10.1214/ss/1009213286. """ - - pd_index = getattr(count, 'index', None) - if pd_index is not None and callable(pd_index): - # this rules out lists, lists have an index method - pd_index = None + is_scalar = np.isscalar(count) and np.isscalar(nobs) + is_pandas = isinstance(count, (pd.Series, pd.DataFrame)) + count_a = array_like(count, "count", optional=False, ndim=None) + nobs_a = array_like(nobs, "nobs", optional=False, ndim=None) def _check(x: np.ndarray, name: str) -> np.ndarray: if np.issubdtype(x.dtype, np.integer): @@ -96,126 +183,131 @@ def _check(x: np.ndarray, name: str) -> np.ndarray: ) return y - count = _check(np.asarray(count), "count") - nobs = _check(np.asarray(nobs), "nobs") + count_a = _check(np.asarray(count_a), "count") + nobs_a = _check(np.asarray(nobs_a), "count") - q_ = count / nobs + q_ = count_a / nobs_a alpha_2 = 0.5 * alpha - if method == 'normal': - std_ = np.sqrt(q_ * (1 - q_) / nobs) - dist = stats.norm.isf(alpha / 2.) * std_ + if method == "normal": + std_ = np.sqrt(q_ * (1 - q_) / nobs_a) + dist = stats.norm.isf(alpha / 2.0) * std_ ci_low = q_ - dist ci_upp = q_ + dist - - - - elif method == 'binom_test': + elif method == "binom_test": # inverting the binomial test - if hasattr(stats, "binomtest"): - def func(qi): - return stats.binomtest(count, nobs, p=qi).pvalue - alpha - else: - # Remove after min SciPy >= 1.7 - def func(qi): - return stats.binom_test(count, nobs, p=qi) - alpha + def func_factory(count: int, nobs: int) -> Callable[[float], float]: + if hasattr(stats, "binomtest"): - def _bound(qi, lower=True): - """ - Try hard to find a bound different from eps or 1 - eps - - Parameters - ---------- - qi : float - The empirical success rate - lower : bool - Whether to fund a lower bound for the left side of the CI - - Returns - ------- - float - The coarse bound - """ - default = FLOAT_INFO.eps if lower else 1.0 - FLOAT_INFO.eps - - def step(v): - return v / 8 if lower else v + (1.0 - v) / 8 - - x = step(qi) - w = func(x) - cnt = 1 - while w > 0 and cnt < 10: - x = step(x) - w = func(x) - cnt += 1 - return x if cnt < 10 else default - - if count == 0: - ci_low = 0 - else: - lower_bnd = _bound(q_, lower=True) - ci_low = optimize.brentq(func, lower_bnd, q_) - if count == nobs: - ci_upp = 1 - else: - upper_bnd = _bound(q_, lower=False) - ci_upp = optimize.brentq(func, q_, upper_bnd) + def func(qi): + return stats.binomtest(count, nobs, p=qi).pvalue - alpha - elif method == 'beta': - ci_low = stats.beta.ppf(alpha_2, count, nobs - count + 1) - ci_upp = stats.beta.isf(alpha_2, count + 1, nobs - count) + else: + # Remove after min SciPy >= 1.7 + def func(qi): + return stats.binom_test(count, nobs, p=qi) - alpha + + return func + + bcast = np.broadcast(count_a, nobs_a) + ci_low = np.zeros(bcast.shape) + ci_upp = np.zeros(bcast.shape) + index = bcast.index + for c, n in bcast: + # Enforce symmetry + reverse = False + _q = q_.flat[index] + if c > n // 2: + c = n - c + reverse = True + _q = 1 - _q + func = func_factory(c, n) + if c == 0: + ci_low.flat[index] = 0.0 + else: + lower_bnd = _bound_proportion_confint(func, _q, lower=True) + val, _z = optimize.brentq( + func, lower_bnd, _q, full_output=True + ) + if func(val) > 0: + power = 10 + new_lb = val - (val - lower_bnd) / 2**power + while func(new_lb) > 0 and power >= 0: + power -= 1 + new_lb = val - (val - lower_bnd) / 2**power + val, _ = _bisection_search_conservative(func, new_lb, _q) + ci_low.flat[index] = val + if c == n: + ci_upp.flat[index] = 1.0 + else: + upper_bnd = _bound_proportion_confint(func, _q, lower=False) + val, _z = optimize.brentq( + func, _q, upper_bnd, full_output=True + ) + if func(val) > 0: + power = 10 + new_ub = val + (upper_bnd - val) / 2**power + while func(new_ub) > 0 and power >= 0: + power -= 1 + new_ub = val - (upper_bnd - val) / 2**power + val, _ = _bisection_search_conservative(func, _q, new_ub) + ci_upp.flat[index] = val + if reverse: + temp = ci_upp.flat[index] + ci_upp.flat[index] = 1 - ci_low.flat[index] + ci_low.flat[index] = 1 - temp + index = bcast.index + elif method == "beta": + ci_low = stats.beta.ppf(alpha_2, count_a, nobs_a - count_a + 1) + ci_upp = stats.beta.isf(alpha_2, count_a + 1, nobs_a - count_a) if np.ndim(ci_low) > 0: - ci_low[q_ == 0] = 0 - ci_upp[q_ == 1] = 1 + ci_low.flat[q_.flat == 0] = 0 + ci_upp.flat[q_.flat == 1] = 1 else: - ci_low = ci_low if (q_ != 0) else 0 - ci_upp = ci_upp if (q_ != 1) else 1 - - elif method == 'agresti_coull': - crit = stats.norm.isf(alpha / 2.) - nobs_c = nobs + crit**2 - q_c = (count + crit**2 / 2.) / nobs_c - std_c = np.sqrt(q_c * (1. - q_c) / nobs_c) + ci_low = 0 if q_ == 0 else ci_low + ci_upp = 1 if q_ == 1 else ci_upp + elif method == "agresti_coull": + crit = stats.norm.isf(alpha / 2.0) + nobs_c = nobs_a + crit**2 + q_c = (count_a + crit**2 / 2.0) / nobs_c + std_c = np.sqrt(q_c * (1.0 - q_c) / nobs_c) dist = crit * std_c ci_low = q_c - dist ci_upp = q_c + dist - - elif method == 'wilson': - crit = stats.norm.isf(alpha / 2.) + elif method == "wilson": + crit = stats.norm.isf(alpha / 2.0) crit2 = crit**2 - denom = 1 + crit2 / nobs - center = (q_ + crit2 / (2 * nobs)) / denom - dist = crit * np.sqrt(q_ * (1. - q_) / nobs + crit2 / (4. * nobs**2)) + denom = 1 + crit2 / nobs_a + center = (q_ + crit2 / (2 * nobs_a)) / denom + dist = crit * np.sqrt( + q_ * (1.0 - q_) / nobs_a + crit2 / (4.0 * nobs_a**2) + ) dist /= denom ci_low = center - dist ci_upp = center + dist - # method adjusted to be more forgiving of misspellings or incorrect option name - elif method[:4] == 'jeff': - ci_low, ci_upp = stats.beta.interval(1 - alpha, count + 0.5, - nobs - count + 0.5) - + elif method[:4] == "jeff": + ci_low, ci_upp = stats.beta.interval( + 1 - alpha, count_a + 0.5, nobs_a - count_a + 0.5 + ) else: - raise NotImplementedError('method "%s" is not available' % method) - - if method in ['normal', 'agresti_coull']: + raise NotImplementedError(f"method {method} is not available") + if method in ["normal", "agresti_coull"]: ci_low = np.clip(ci_low, 0, 1) ci_upp = np.clip(ci_upp, 0, 1) - if pd_index is not None and np.ndim(ci_low) > 0: - import pandas as pd - if np.ndim(ci_low) == 1: - ci_low = pd.Series(ci_low, index=pd_index) - ci_upp = pd.Series(ci_upp, index=pd_index) - if np.ndim(ci_low) == 2: - ci_low = pd.DataFrame(ci_low, index=pd_index) - ci_upp = pd.DataFrame(ci_upp, index=pd_index) - + if is_pandas: + container = pd.Series if isinstance(count, pd.Series) else pd.DataFrame + ci_low = container(ci_low, index=count.index) + ci_upp = container(ci_upp, index=count.index) + if is_scalar: + return float(ci_low), float(ci_upp) return ci_low, ci_upp def multinomial_proportions_confint(counts, alpha=0.05, method='goodman'): - '''Confidence intervals for multinomial proportions. + """ + Confidence intervals for multinomial proportions. Parameters ---------- @@ -297,7 +389,7 @@ def multinomial_proportions_confint(counts, alpha=0.05, method='goodman'): simultaneous confidence intervals for multinomial proportions for small counts in a large number of cells," Journal of Statistical Software, Vol. 5, No. 6, 2000, pp. 1-24. - ''' + """ if alpha <= 0 or alpha >= 1: raise ValueError('alpha must be in (0, 1), bounds excluded') counts = np.array(counts, dtype=float) @@ -316,24 +408,30 @@ def multinomial_proportions_confint(counts, alpha=0.05, method='goodman'): elif method[:5] == 'sison': # We accept any name starting with 'sison' # Define a few functions we'll use a lot. def poisson_interval(interval, p): - """Compute P(b <= Z <= a) where Z ~ Poisson(p) and - `interval = (b, a)`.""" + """ + Compute P(b <= Z <= a) where Z ~ Poisson(p) and + `interval = (b, a)`. + """ b, a = interval prob = stats.poisson.cdf(a, p) - stats.poisson.cdf(b - 1, p) return prob def truncated_poisson_factorial_moment(interval, r, p): - """Compute mu_r, the r-th factorial moment of a poisson random - variable of parameter `p` truncated to `interval = (b, a)`.""" + """ + Compute mu_r, the r-th factorial moment of a poisson random + variable of parameter `p` truncated to `interval = (b, a)`. + """ b, a = interval return p ** r * (1 - ((poisson_interval((a - r + 1, a), p) - poisson_interval((b - r, b - 1), p)) / poisson_interval((b, a), p))) def edgeworth(intervals): - """Compute the Edgeworth expansion term of Sison & Glaz's formula + """ + Compute the Edgeworth expansion term of Sison & Glaz's formula (1) (approximated probability for multinomial proportions in a - given box).""" + given box). + """ # Compute means and central moments of the truncated poisson # variables. mu_r1, mu_r2, mu_r3, mu_r4 = [ @@ -363,8 +461,10 @@ def edgeworth(intervals): def approximated_multinomial_interval(intervals): - """Compute approximated probability for Multinomial(n, proportions) - to be in `intervals` (Sison & Glaz's formula (1)).""" + """ + Compute approximated probability for Multinomial(n, proportions) + to be in `intervals` (Sison & Glaz's formula (1)). + """ return np.exp( np.sum(np.log([poisson_interval(interval, p) for (interval, p) in zip(intervals, counts)])) + @@ -373,8 +473,10 @@ def approximated_multinomial_interval(intervals): ) def nu(c): - """Compute interval coverage for a given `c` (Sison & Glaz's - formula (7)).""" + """ + Compute interval coverage for a given `c` (Sison & Glaz's + formula (7)). + """ return approximated_multinomial_interval( [(np.maximum(count - c, 0), np.minimum(count + c, n)) for count in counts]) @@ -404,7 +506,8 @@ def nu(c): def samplesize_confint_proportion(proportion, half_length, alpha=0.05, method='normal'): - '''find sample size to get desired confidence interval length + """ + Find sample size to get desired confidence interval length Parameters ---------- @@ -429,7 +532,7 @@ def samplesize_confint_proportion(proportion, half_length, alpha=0.05, this is mainly to store the formula. possible application: number of replications in bootstrap samples - ''' + """ q_ = proportion if method == 'normal': n = q_ * (1 - q_) / (half_length / stats.norm.isf(alpha / 2.))**2 @@ -440,7 +543,7 @@ def samplesize_confint_proportion(proportion, half_length, alpha=0.05, def proportion_effectsize(prop1, prop2, method='normal'): - ''' + """ Effect size for a test comparing two proportions for use in power function @@ -474,7 +577,7 @@ def proportion_effectsize(prop1, prop2, method='normal'): >>> sm.stats.proportion_effectsize([0.3, 0.4, 0.5], 0.4) array([-0.21015893, 0. , 0.20135792]) - ''' + """ if method != 'normal': raise ValueError('only "normal" is implemented') @@ -483,7 +586,8 @@ def proportion_effectsize(prop1, prop2, method='normal'): def std_prop(prop, nobs): - '''standard error for the estimate of a proportion + """ + Standard error for the estimate of a proportion This is just ``np.sqrt(p * (1. - p) / nobs)`` @@ -498,7 +602,7 @@ def std_prop(prop, nobs): ------- std : array_like standard error for a proportion of nobs independent observations - ''' + """ return np.sqrt(prop * (1. - prop) / nobs) @@ -509,13 +613,14 @@ def _std_diff_prop(p1, p2, ratio=1): def _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt, alpha=0.05, discrete=True, dist='norm', nobs=None, continuity=0, critval_continuity=0): - '''Generic statistical power function for normal based equivalence test + """ + Generic statistical power function for normal based equivalence test This includes options to adjust the normal approximation and can use the binomial to evaluate the probability of the rejection region see power_ztost_prob for a description of the options - ''' + """ # TODO: refactor structure, separate norm and binom better if not isinstance(continuity, tuple): continuity = (continuity, continuity) @@ -551,7 +656,8 @@ def _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt, def binom_tost(count, nobs, low, upp): - '''exact TOST test for one proportion using binomial distribution + """ + Exact TOST test for one proportion using binomial distribution Parameters ---------- @@ -569,7 +675,7 @@ def binom_tost(count, nobs, low, upp): pval_low, pval_upp : floats p-values of lower and upper one-sided tests - ''' + """ # binom_test_stat only returns pval tt1 = binom_test(count, nobs, alternative='larger', prop=low) tt2 = binom_test(count, nobs, alternative='smaller', prop=upp) @@ -577,7 +683,8 @@ def binom_tost(count, nobs, low, upp): def binom_tost_reject_interval(low, upp, nobs, alpha=0.05): - '''rejection region for binomial TOST + """ + Rejection region for binomial TOST The interval includes the end points, `reject` if and only if `r_low <= x <= r_upp`. @@ -596,7 +703,7 @@ def binom_tost_reject_interval(low, upp, nobs, alpha=0.05): x_low, x_upp : float lower and upper bound of rejection region - ''' + """ x_low = stats.binom.isf(alpha, nobs, low) + 1 x_upp = stats.binom.ppf(alpha, nobs, upp) - 1 return x_low, x_upp @@ -697,7 +804,8 @@ def power_binom_tost(low, upp, nobs, p_alt=None, alpha=0.05): def power_ztost_prop(low, upp, nobs, p_alt, alpha=0.05, dist='norm', variance_prop=None, discrete=True, continuity=0, critval_continuity=0): - '''Power of proportions equivalence test based on normal distribution + """ + Power of proportions equivalence test based on normal distribution Parameters ---------- @@ -766,7 +874,7 @@ def power_ztost_prop(low, upp, nobs, p_alt, alpha=0.05, dist='norm', SAS Manual: Chapter 68: The Power Procedure, Computational Resources PASS Chapter 110: Equivalence Tests for One Proportion. - ''' + """ mean_low = low var_low = std_prop(low, nobs)**2 mean_upp = upp @@ -782,7 +890,8 @@ def power_ztost_prop(low, upp, nobs, p_alt, alpha=0.05, dist='norm', def _table_proportion(count, nobs): - '''create a k by 2 contingency table for proportion + """ + Create a k by 2 contingency table for proportion helper function for proportions_chisquare @@ -802,7 +911,7 @@ def _table_proportion(count, nobs): ----- recent scipy has more elaborate contingency table functions - ''' + """ count = np.asarray(count) dt = np.promote_types(count.dtype, np.float64) count = np.asarray(count, dtype=dt) @@ -917,7 +1026,8 @@ def proportions_ztest(count, nobs, value=None, alternative='two-sided', def proportions_ztost(count, nobs, low, upp, prop_var='sample'): - '''Equivalence test based on normal distribution + """ + Equivalence test based on normal distribution Parameters ---------- @@ -949,7 +1059,7 @@ def proportions_ztost(count, nobs, low, upp, prop_var='sample'): ----- checked only for 1 sample case - ''' + """ if prop_var == 'limits': prop_var_low = low prop_var_upp = upp @@ -968,7 +1078,8 @@ def proportions_ztost(count, nobs, low, upp, prop_var='sample'): def proportions_chisquare(count, nobs, value=None): - '''test for proportions based on chisquare test + """ + Test for proportions based on chisquare test Parameters ---------- @@ -1007,7 +1118,7 @@ def proportions_chisquare(count, nobs, value=None): given and count and nobs are not scalar, then the null hypothesis is that all samples have the same proportion. - ''' + """ nobs = np.atleast_1d(nobs) table, expected, n_rows = _table_proportion(count, nobs) if value is not None: @@ -1023,7 +1134,8 @@ def proportions_chisquare(count, nobs, value=None): def proportions_chisquare_allpairs(count, nobs, multitest_method='hs'): - '''chisquare test of proportions for all pairs of k samples + """ + Chisquare test of proportions for all pairs of k samples Performs a chisquare test for proportions for all pairwise comparisons. The alternative is two-sided @@ -1053,7 +1165,7 @@ def proportions_chisquare_allpairs(count, nobs, multitest_method='hs'): Notes ----- Yates continuity correction is not available. - ''' + """ #all_pairs = lmap(list, lzip(*np.triu_indices(4, 1))) all_pairs = lzip(*np.triu_indices(len(count), 1)) pvals = [proportions_chisquare(count[list(pair)], nobs[list(pair)])[1] @@ -1063,7 +1175,8 @@ def proportions_chisquare_allpairs(count, nobs, multitest_method='hs'): def proportions_chisquare_pairscontrol(count, nobs, value=None, multitest_method='hs', alternative='two-sided'): - '''chisquare test of proportions for pairs of k samples compared to control + """ + Chisquare test of proportions for pairs of k samples compared to control Performs a chisquare test for proportions for pairwise comparisons with a control (Dunnet's test). The control is assumed to be the first element @@ -1102,7 +1215,7 @@ def proportions_chisquare_pairscontrol(count, nobs, value=None, ``value`` and ``alternative`` options are not yet implemented. - ''' + """ if (value is not None) or (alternative not in ['two-sided', '2s']): raise NotImplementedError #all_pairs = lmap(list, lzip(*np.triu_indices(4, 1))) @@ -1116,7 +1229,8 @@ def proportions_chisquare_pairscontrol(count, nobs, value=None, def confint_proportions_2indep(count1, nobs1, count2, nobs2, method=None, compare='diff', alpha=0.05, correction=True): - """Confidence intervals for comparing two independent proportions + """ + Confidence intervals for comparing two independent proportions This assumes that we have two independent binomial samples. @@ -1298,7 +1412,8 @@ def confint_proportions_2indep(count1, nobs1, count2, nobs2, method=None, def _shrink_prob(count1, nobs1, count2, nobs2, shrink_factor=2, return_corr=True): - """shrink observed counts towards independence + """ + Shrink observed counts towards independence Helper function for 'logit-smoothed' inference for the odds-ratio of two independent proportions. @@ -1342,7 +1457,8 @@ def _shrink_prob(count1, nobs1, count2, nobs2, shrink_factor=2, def score_test_proportions_2indep(count1, nobs1, count2, nobs2, value=None, compare='diff', alternative='two-sided', correction=True, return_results=True): - """score_test for two independent proportions + """ + Score test for two independent proportions This uses the constrained estimate of the proportions to compute the variance under the Null hypothesis. @@ -1498,7 +1614,8 @@ def test_proportions_2indep(count1, nobs1, count2, nobs2, value=None, method=None, compare='diff', alternative='two-sided', correction=True, return_results=True): - """Hypothesis test for comparing two independent proportions + """ + Hypothesis test for comparing two independent proportions This assumes that we have two independent binomial samples. @@ -1872,7 +1989,8 @@ def tost_proportions_2indep(count1, nobs1, count2, nobs2, low, upp, def _std_2prop_power(diff, p2, ratio=1, alpha=0.05, value=0): - """compute standard error under null and alternative for 2 proportions + """ + Compute standard error under null and alternative for 2 proportions helper function for power and sample size computation @@ -1899,7 +2017,8 @@ def _std_2prop_power(diff, p2, ratio=1, alpha=0.05, value=0): def power_proportions_2indep(diff, prop2, nobs1, ratio=1, alpha=0.05, value=0, alternative='two-sided', return_results=True): - """power for ztest that two independent proportions are equal + """ + Power for ztest that two independent proportions are equal This assumes that the variance is based on the pooled proportion under the null and the non-pooled variance under the alternative @@ -1980,7 +2099,8 @@ def power_proportions_2indep(diff, prop2, nobs1, ratio=1, alpha=0.05, def samplesize_proportions_2indep_onetail(diff, prop2, power, ratio=1, alpha=0.05, value=0, alternative='two-sided'): - """required sample size assuming normal distribution based on one tail + """ + Required sample size assuming normal distribution based on one tail This uses an explicit computation for the sample size that is required to achieve a given power corresponding to the appropriate tails of the @@ -2031,7 +2151,8 @@ def samplesize_proportions_2indep_onetail(diff, prop2, power, ratio=1, def _score_confint_inversion(count1, nobs1, count2, nobs2, compare='diff', alpha=0.05, correction=True): - """Compute score confidence interval by inverting score test + """ + Compute score confidence interval by inverting score test Parameters ---------- @@ -2104,7 +2225,8 @@ def func(v): def _confint_riskratio_koopman(count1, nobs1, count2, nobs2, alpha=0.05, correction=True): - """score confidence interval for ratio or proportions, Koopman/Nam + """ + Score confidence interval for ratio or proportions, Koopman/Nam signature not consistent with other functions @@ -2139,7 +2261,8 @@ def _confint_riskratio_koopman(count1, nobs1, count2, nobs2, alpha=0.05, def _confint_riskratio_paired_nam(table, alpha=0.05): - """confidence interval for marginal risk ratio for matched pairs + """ + Confidence interval for marginal risk ratio for matched pairs need full table
diff --git a/statsmodels/stats/tests/test_proportion.py b/statsmodels/stats/tests/test_proportion.py --- a/statsmodels/stats/tests/test_proportion.py +++ b/statsmodels/stats/tests/test_proportion.py @@ -29,6 +29,7 @@ ) from statsmodels.tools.sm_exceptions import HypothesisTestWarning from statsmodels.tools.testing import Holder +from statsmodels.stats.tests.results.results_proportion import res_binom, res_binom_methods probci_methods = {'agresti_coull': 'agresti-coull', 'normal': 'asymptotic', @@ -37,31 +38,27 @@ 'jeffreys': 'bayes' } - -def test_confint_proportion(): - from .results.results_proportion import res_binom, res_binom_methods - - - for case in res_binom: - count, nobs = case - for method in probci_methods: - idx = res_binom_methods.index(probci_methods[method]) - res_low = res_binom[case].ci_low[idx] - res_upp = res_binom[case].ci_upp[idx] - if np.isnan(res_low) or np.isnan(res_upp): - continue - if (count == 0 or count == nobs) and method == 'jeffreys': - # maybe a bug or different corner case definition - continue - if method == 'jeffreys' and nobs == 30: - # something is strange in extreme case e.g 0/30 or 1/30 - continue - ci = proportion_confint(count, nobs, alpha=0.05, method=method) - # we impose that confint is in [0, 1] - res_low = max(res_low, 0) - res_upp = min(res_upp, 1) - assert_almost_equal(ci, [res_low, res_upp], decimal=6, - err_msg=repr(case) + method) [email protected]("case",res_binom) [email protected]("method",probci_methods) +def test_confint_proportion(method, case): + count, nobs = case + idx = res_binom_methods.index(probci_methods[method]) + res_low = res_binom[case].ci_low[idx] + res_upp = res_binom[case].ci_upp[idx] + if np.isnan(res_low) or np.isnan(res_upp): + pytest.skip("Skipping due to NaN value") + if (count == 0 or count == nobs) and method == 'jeffreys': + # maybe a bug or different corner case definition + pytest.skip("Skipping nobs 0 or count and jeffreys") + if method == 'jeffreys' and nobs == 30: + # something is strange in extreme case e.g 0/30 or 1/30 + pytest.skip("Skipping nobs is 30 and jeffreys due to extreme case problem") + ci = proportion_confint(count, nobs, alpha=0.05, method=method) + # we impose that confint is in [0, 1] + res_low = max(res_low, 0) + res_upp = min(res_upp, 1) + assert_almost_equal(ci, [res_low, res_upp], decimal=6, + err_msg=repr(case) + method) @pytest.mark.parametrize('method', probci_methods) @@ -927,8 +924,12 @@ def test_power_2indep(): @pytest.mark.parametrize("count", np.arange(10, 90, 5)) [email protected]("method", list(probci_methods.keys()) + ["binom_test"]) -def test_ci_symmetry(count, method): [email protected]( + "method", list(probci_methods.keys()) + ["binom_test"] +) [email protected]("array_like", [False, True]) +def test_ci_symmetry(count, method, array_like): + _count = [count] * 3 if array_like else count n = 100 a = proportion_confint(count, n, method=method) b = proportion_confint(n - count, n, method=method) @@ -937,9 +938,12 @@ def test_ci_symmetry(count, method): @pytest.mark.parametrize("nobs", [47, 50]) @pytest.mark.parametrize("count", np.arange(48)) -def test_ci_symmetry_binom_test(nobs, count): - a = proportion_confint(count, nobs, method="binom_test") - b = proportion_confint(nobs - count, nobs, method="binom_test") [email protected]("array_like", [False, True]) +def test_ci_symmetry_binom_test(nobs, count, array_like): + _count = [count] * 3 if array_like else count + nobs_m_count = [nobs - count] * 3 if array_like else nobs - count + a = proportion_confint(_count, nobs, method="binom_test") + b = proportion_confint(nobs_m_count, nobs, method="binom_test") assert_allclose(np.array(a), 1.0 - np.array(b[::-1])) @@ -955,3 +959,14 @@ def test_int_check(): c = proportion_confint(21, 47.0, method="binom_test") assert_allclose(a, b) assert_allclose(a, c) + + [email protected]("count", np.arange(10, 90, 5)) [email protected]( + "method", list(probci_methods.keys()) + ["binom_test"] +) +def test_ci_symmetry_array(count, method): + n = 100 + a = proportion_confint([count, count], n, method=method) + b = proportion_confint([n - count, n - count], n, method=method) + assert_allclose(np.array(a), 1.0 - np.array(b[::-1]))
proportion_confint inaccurate for method="binom_test" There is a symmetry for binomial proportion confidence interval therefore following property should holds (approximately): proportion_confint(count, nobs, method="binom_test")[0] + proportion_confint(nobs-count, nobs, method="binom_test")[1] ==1.0 but for some cases this property does not hold, for example, the following code: ```python from statsmodels.stats.proportion import proportion_confint eps = 1e-8 for nobs in [47, 50]: for count in range(nobs+1): p1 = proportion_confint(count, nobs, method="binom_test") p2 = proportion_confint(nobs-count, nobs, method="binom_test") if abs(p1[0]+p2[1]-1.0)>eps: print(f"{nobs=} {count=} {p1[0]+p2[1]}") ``` gives ouput: ``` nobs=47 count=3 0.9899913901078568 nobs=47 count=6 0.9857792597568958 nobs=47 count=12 0.9864319871901812 nobs=47 count=16 0.9851553703015263 nobs=47 count=23 0.9802691065584829 nobs=47 count=24 0.980269106558583 nobs=47 count=31 0.978547082683319 nobs=47 count=35 0.9718877308702878 nobs=47 count=41 0.978042999703059 nobs=47 count=44 0.9680701802192785 nobs=50 count=21 0.9796386543796607 nobs=50 count=29 0.9798450168011505 ``` Which indicate that there is some bug. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.8.5.final.0 statsmodels =========== Installed: 0.13.1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.17.3 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\numpy) scipy: 1.3.3 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\scipy) pandas: 0.25.3 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\pandas) dateutil: 2.8.1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\dateutil) patsy: 0.5.2 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.2.0rc1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\matplotlib) backend: TkAgg cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: 7.17.0 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\IPython) jinja2: 2.11.2 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\jinja2) sphinx: Not installed pygments: 2.6.1 (C:\Users\Admin\AppData\Local\Programs\Python\Python38-32\lib\site-packages\pygments) pytest: Not installed virtualenv: Not installed </details>
Thanks for reporting I can confirm this, but don't see any obvious problem. It's either because of some discreteness in scipy.stats.binom_test or in scipy.optimize.brentq. The differences in the confint are nontrivial, but inverting binom_test is not one of the recommended confints (both in the literature and in terms of the quickly done implementation). But I don't have time to dig into this right now (It's too far away from what I'm currently working on.) label `bug-wrong` because we should have consistent results for symmetry. However my guess is that they choose different solution because of non-monotonicities or discreteness, which are often a problem with hypothesis tests for proportions. Requirement for size is only `<= alpha` but not `== alpha` because of the discreteness of the sample space. I have tested the current version of proportion_confint for method="binom_test" and it looks like the new version works better. But symmetry still does not holds for some cases, for example, the following code: ```python from statsmodels.stats.proportion import proportion_confint eps = 1e-8 for nobs in range(45, 60): for count in range(nobs+1): p1 = proportion_confint(count, nobs, method="binom_test") p2 = proportion_confint(nobs-count, nobs, method="binom_test") if abs(p1[0]+p2[1]-1.0) > eps: print(f"{nobs=} {count=} {p1[0]+p2[1]}") ``` gives output: ``` nobs=45 count=45 0.991280515088538 nobs=51 count=51 1.0076714652881384 nobs=53 count=53 1.0073761192248545 nobs=54 count=54 0.9927631623092528 nobs=55 count=49 0.995421075648545 nobs=55 count=51 1.0079684524905446 nobs=58 count=58 0.9932712586455265 nobs=59 count=59 0.9933873005848147 ``` Also, previously noted that "Requirement for size is only <= alpha" i.e. method="binom_test" should be conservative. But the current version is not conservative. This can be seen by plotting the graph via the following code: ```python from statsmodels.stats.proportion import proportion_confint from scipy.special import comb import matplotlib.pyplot as plt import numpy as np def plot_coverage(methods_for_test, alpha, num, nobs_list): # start and end points are excluded to avoid any issue with 0**0 p_points = np.linspace(1.0/num, 1.0, num, endpoint=False) for nobs in nobs_list: # compute in advance all binomial coefficients for nobs bc = comb(nobs, range(nobs+1)) for method in methods_for_test: # compute in advance all confidence intervals all_ci = [[], []] for count in range(nobs+1): ci = proportion_confint( count, nobs, alpha=alpha, method=method) all_ci[0].append(ci[0]) all_ci[1].append(ci[1]) plot_data = [] for p in p_points: cp = 0.0 for count in range(nobs + 1): if all_ci[0][count] < p < all_ci[1][count]: cp += bc[count]*(p**count)*((1.0-p)**(nobs-count)) plot_data.append(cp) if cp < 1.0-alpha: print(f"{nobs=} {p=:.5f} {cp=}") plt.plot(p_points, plot_data, label=method + f", {nobs=}") plt.hlines(1.0-alpha, 0.0, 1.0, label=f"confidence line, {alpha=}") plt.grid(True) plt.legend() plt.xlabel('p') plt.ylabel(r"coverage probability") plt.show() methods_for_test = ["binom_test"] plot_coverage(methods_for_test, alpha=0.05, num=1999, nobs_list=[31, 50, 75]) ``` So we can see that in some cases coverage probability is less than 1-alpha, for example: ``` nobs=31 p=0.17150 cp=0.948454483652453 nobs=31 p=0.82850 cp=0.9484545872375366 nobs=50 p=0.07450 cp=0.9486301423455323 nobs=75 p=0.84900 cp=0.9493717635429614 ``` Also, proportion_confint does not work when count is array_array_like and method="binom_test", for example code: ```python from statsmodels.stats.proportion import proportion_confint proportion_confint(range(10), 50, method="binom_test") ``` doesn't work.
"2022-02-25T19:13:12Z"
0.13dev
[ "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[agresti_coull]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[jeffreys-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case3]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case4]", "statsmodels/stats/tests/test_proportion.py::test_multinomial_proportions_errors", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[jeffreys-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case4]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case1]", "statsmodels/stats/tests/test_proportion.py::test_samplesize_confidenceinterval_prop", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[jeffreys-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[wilson]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case3]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case4]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case4]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[beta]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case5]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case5]", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case5]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[normal]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[jeffreys]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case3]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case3]", "statsmodels/stats/tests/test_proportion.py::test_proportion_effect_size", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case5]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions_zeros", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count7-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count16-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_proportion_ztests", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count42-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count17-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count40-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count46-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count47-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count31-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count4-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count23-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count1-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count21-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count32-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count36-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_score_confint_koopman_nam", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count22-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count32-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count33-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count21-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count13-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count25-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count41-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count11-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count5-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count6-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count35-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count34-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count30-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_binom_test", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count35-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count5-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count47-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count46-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count29-47]", "statsmodels/stats/tests/test_proportion.py::test_binom_rejection_interval", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count14-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count38-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count39-47]", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count27-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count23-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count33-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count3-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count30-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count38-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count18-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count27-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count12-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count44-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count31-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count9-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_power_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count42-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count8-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count12-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count24-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count9-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count19-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count6-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count0-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count43-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count0-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count10-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count37-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count45-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_ztost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count2-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count28-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count44-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_power_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count26-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count1-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count15-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count22-47]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop_norm", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count2-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count39-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count41-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count8-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count11-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count19-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count28-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count15-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count17-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count40-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count20-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count36-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count43-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count24-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count10-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_equivalence_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count45-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count29-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count7-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count20-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count3-50]", "statsmodels/stats/tests/test_proportion.py::test_score_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep_propcis", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count26-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count14-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count16-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count25-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count4-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count37-50]", "statsmodels/stats/tests/test_proportion.py::test_int_check", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count13-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count18-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count34-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count10]", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_number_pairs_1493", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_default_values", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_scalar", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_proptest", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_pairwiseproptest" ]
[ "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count27-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count19-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count20-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count43-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count6-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count33-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count46-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count45-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count30-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count8-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count0-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count23-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count46-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count15-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count40-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count24-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count37-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count21-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count10-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count36-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count12-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count13-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count23-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count18-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count47-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count24-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count34-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count30-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count13-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count44-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count40-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count9-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count25-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count33-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count22-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count27-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count10-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count36-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count41-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count5-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count42-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count26-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count43-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count32-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count8-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count2-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count1-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count28-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count26-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count3-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count41-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count22-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count9-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count39-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count18-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count17-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count32-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count44-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count7-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count17-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count21-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count12-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count14-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count16-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count3-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count35-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count47-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count42-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count7-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count15-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count1-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count37-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count35-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count38-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count5-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count4-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count34-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count31-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count4-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count11-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count25-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count16-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count14-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count38-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count19-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count6-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count29-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count29-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count20-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count11-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count31-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count0-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count45-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count2-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count28-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count39-47]" ]
Python
[]
[]
statsmodels/statsmodels
8,339
statsmodels__statsmodels-8339
[ "8338" ]
35b803767bd7803ca5f9fc35d4546aa8cb7be844
diff --git a/statsmodels/stats/diagnostic.py b/statsmodels/stats/diagnostic.py --- a/statsmodels/stats/diagnostic.py +++ b/statsmodels/stats/diagnostic.py @@ -399,23 +399,6 @@ def acorr_ljungbox(x, lags=None, boxpierce=False, model_df=0, period=None, lb_stat lb_pvalue 10 214.106992 1.827374e-40 """ - def get_optimal_length(threshold_metric, threshold, maxlag, func): - optimal_lag = 0 - least_penalised = 0 - - for lags in range(1, maxlag + 1): - if (threshold_metric <= threshold): - penalty = lags * np.log(nobs) - else: - penalty = 2 * lags - - test_statistic = func(lags) - penalised = test_statistic - penalty - if (penalised > least_penalised): - optimal_lag = lags - least_penalised = penalised - - return optimal_lag # Avoid cyclic import from statsmodels.tsa.stattools import acf x = array_like(x, "x") @@ -429,25 +412,30 @@ def get_optimal_length(threshold_metric, threshold, maxlag, func): if auto_lag: maxlag = nobs - 1 - # Compute threshold metrics + # Compute sum of squared autocorrelations sacf = acf(x, nlags=maxlag, fft=False) - sacf2 = sacf[1:maxlag + 1] ** 2 / (nobs - np.arange(1, maxlag + 1)) + + if not boxpierce: + q_sacf = (nobs * (nobs + 2) * + np.cumsum(sacf[1:maxlag + 1] ** 2 + / (nobs - np.arange(1, maxlag + 1)))) + else: + q_sacf = nobs * np.cumsum(sacf[1:maxlag + 1] ** 2) + + # obtain thresholds q = 2.4 threshold = np.sqrt(q * np.log(nobs)) - threshold_metric = max(np.abs(sacf)) * np.sqrt(nobs) + threshold_metric = np.abs(sacf).max() * np.sqrt(nobs) - if not boxpierce: - lags = get_optimal_length( - threshold_metric, - threshold, maxlag, - lambda p: nobs * (nobs + 2) * np.cumsum(sacf2)[p - 1]) + # compute penalized sum of squared autocorrelations + if (threshold_metric <= threshold): + q_sacf = q_sacf - (np.arange(1, nobs) * np.log(nobs)) else: - lags = get_optimal_length( - threshold_metric, - threshold, - maxlag, - lambda p: nobs * np.cumsum(sacf[1:maxlag + 1] ** 2)[p - 1]) + q_sacf = q_sacf - (2 * np.arange(1, nobs)) + # note: np.argmax returns first (i.e., smallest) index of largest value + lags = np.argmax(q_sacf) + lags = max(1, lags) # optimal lag has to be at least 1 lags = int_like(lags, "lags") lags = np.arange(1, lags + 1) elif period is not None:
diff --git a/statsmodels/stats/tests/test_diagnostic.py b/statsmodels/stats/tests/test_diagnostic.py --- a/statsmodels/stats/tests/test_diagnostic.py +++ b/statsmodels/stats/tests/test_diagnostic.py @@ -1701,17 +1701,29 @@ def test_ljungbox_dof_adj(): assert np.all(res2.iloc[4:, 1] <= res1.iloc[4:, 1]) -def test_ljungbox_auto_lag_selection(): +def test_ljungbox_auto_lag_selection(reset_randomstate): data = sunspots.load_pandas().data["SUNACTIVITY"] res = AutoReg(data, 4, old_names=False).fit() resid = res.resid - res1 = smsdia.acorr_ljungbox(resid) - res2 = smsdia.acorr_ljungbox(resid, model_df=4) + res1 = smsdia.acorr_ljungbox(resid, auto_lag=True) + res2 = smsdia.acorr_ljungbox(resid, model_df=4, auto_lag=True) assert_allclose(res1.iloc[:, 0], res2.iloc[:, 0]) + # TODO: compare selected lags with Stata/ R to confirm + # that corect auto_lag is selected + assert res1.shape[0] >= 1 + assert res2.shape[0] >= 1 assert np.all(np.isnan(res2.iloc[:4, 1])) assert np.all(res2.iloc[4:, 1] <= res1.iloc[4:, 1]) +def test_ljungbox_auto_lag_whitenoise(reset_randomstate): + data = np.random.randn(1000) # white noise process + res = smsdia.acorr_ljungbox(data, auto_lag=True) + # TODO: compare selected lags with Stata/ R to confirm + # that correct auto_lag is selected + assert res.shape[0] >= 1 # auto lag selected must be at least 1 + + def test_ljungbox_errors_warnings(): data = sunspots.load_pandas().data["SUNACTIVITY"] with pytest.raises(ValueError, match="model_df must"):
auto_lag selection in acorr_ljungbox fails when there is no autocorrelation #### Describe the bug The `acorr_ljungbox` function has an `auto_lag` flag which determines the optimal lag for performing the Ljung-Box autocorrelations test. When there is no autocorrelation in the data, however, the function fails and returns a "ValueError: zero-size array to reduction operation maximum which has no identity". When there is low autocorrelation, it may be returning an incorrect optimal lag (for theoretical reasons - described under **Details**). #### Code Sample, a copy-pastable example if possible ```python import numpy as np from statsmodels.stats.diagnostic import acorr_ljungbox data = np.random.rand(1000) # white noise process with no autocorrelation res = acorr_ljunbox(data, auto_lag=True) ValueError: zero-size array to reduction operation maximum which has no identity ``` <details> The `auto_lag` feature attempts to implement the lag selection criterion outlined in Escanciano & Lobato (J. of Econometrics, 2009). However, there are two errors in the implementation, which is performed by the `get_optimal_length` inner function of the` acorr_ljungbox` function. There is also one error in the associated pytest. The first error is that the optimal_lag is initialized at `0`. When the rest of the routine does not update the optimal_lag (see second error below for why this may happen), the optimal lag length is set at `0`. This causes an empty array to be initialized, and when the function tries to find the `max()` of this array, the `ValueError` related to a zero-size array is returned. Even theoretically, [1] defines the putative optimal lag $\tilde{p}$ as `>=1`. The second error is that `get_optimal_length` initializes the value of the least penalized test statistic ($L_p$ in eq. (4), pg. 143 in [1]) as `0`. This is incorrect --- there is no theoretical reason why the penalized value of the test statistic should be positive. This is especially true for series that exhibit low degrees of autocorrelation, when the unpenalized Box-Pierce (or Ljung-Box) test statistic can be a very low value. Thus, even if the least penalized test statistic occurs at a large lag, if it has a negative penalized score, `get_optimal_lag` would still return `0` as the optimal lag. Finally, the test associated with this function `test_ljungbox_auto_lag_selection()` (in `test_diagnostic.py`) calls `acorr_ljungbox()`, but omits to set `auto_lag = True`. So the test is invalid. I've initiated a pull request fixing the above issues and the test. This is my first bug report and pull request, so any guidance is greatly appreciated! _[1] J. Carlos Escanciano, Ignacio N. Lobato β€œAn automatic Portmanteau test for serial correlation”., J. of Econometrics Volume 151, 2009._ </details> #### Expected Output Return the result for the Ljung-Box test with the optimal lag as 1 if autocorrelation is absent, and the correct lag based on the Escanciano & Lobato procedure if autocorrelation is not absent. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> Output of ``import statsmodels.api as sm; sm.show_versions()`` INSTALLED VERSIONS ------------------ Python: 3.8.1.final.0 statsmodels =========== Installed: 0.13.2 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\statsmodels) Required Dependencies ===================== cython: 0.29.15 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\Cython) numpy: 1.18.1 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy) scipy: 1.4.1 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\scipy) pandas: 1.0.1 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\pandas) dateutil: 2.8.1 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\dateutil) patsy: 0.5.2 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.1.3 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\matplotlib) backend: TkAgg cvxopt: Not installed joblib: 1.1.0 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\joblib) Developer Tools ================ IPython: 7.16.1 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\IPython) jinja2: 2.11.2 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\jinja2) sphinx: 3.1.2 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\sphinx) pygments: 2.6.1 (C:\Users\Vasudeva\AppData\Local\Programs\Python\Python38\lib\site-packages\pygments) pytest: Not installed virtualenv: Not installed </details>
"2022-07-07T01:40:51Z"
0.13
[ "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_against_r", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_hac", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_influence", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_recursive_residuals", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_small_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_j", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_cox", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey_exogs", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_error[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_big_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_breaks_hansen", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_basic", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_nested[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan_nonrobust", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_rainbow", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_arch", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_nested[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_arch2", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey_multidim", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_cusum_ols", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan_1d_err", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_white", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_goldfeldquandt", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_error[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_lr", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_normality", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_white_error", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_harvey_collier", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan_nonrobust", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_recursive_residuals", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_rainbow", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_arch2", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey_exogs", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_hac", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_small_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_j", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_harvey_collier", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_lr", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_error[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_arch", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_goldfeldquandt", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_cusum_ols", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_white_error", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_basic", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_nested[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_white", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_normality", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey_multidim", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_error[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_breaks_hansen", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_nested[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_influence", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_cox", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan_1d_err", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_big_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_against_r", "statsmodels/stats/tests/test_diagnostic.py::test_gq", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_influence_wrapped", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-True]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_dof_adj", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_spec_white_error", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_auto_lag_selection", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[300]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_small_skip", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-True]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_exception", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-False]", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_error", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-False]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_outlier_influence_funcs", "statsmodels/stats/tests/test_diagnostic.py::test_spec_white", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-True]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_errors_warnings", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-True]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-False]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_period", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[None]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke_no_autolag", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_direct[nonrobust]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_direct[HC0]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-False]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[0.33]", "statsmodels/stats/tests/test_diagnostic.py::test_outlier_test", "statsmodels/stats/tests/test_diagnostic.py::test_influence_dtype", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_linear_lm_direct", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-princomp-2]" ]
[ "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_auto_lag_whitenoise" ]
Python
[]
[]
statsmodels/statsmodels
8,371
statsmodels__statsmodels-8371
[ "8360" ]
edd843f0afb37b30cb503709e2d4ff4bc57f9c04
diff --git a/statsmodels/genmod/families/family.py b/statsmodels/genmod/families/family.py --- a/statsmodels/genmod/families/family.py +++ b/statsmodels/genmod/families/family.py @@ -8,6 +8,7 @@ import inspect +import warnings import numpy as np from scipy import special, stats @@ -1310,6 +1311,9 @@ class NegativeBinomial(Family): def __init__(self, link=None, alpha=1.): self.alpha = 1. * alpha # make it at least float + if alpha is self.__init__.__defaults__[1]: + warnings.warn("Negative binomial dispersion parameter alpha not " + f"set. Using default value alpha={alpha}.") if link is None: link = L.Log() super(NegativeBinomial, self).__init__(
diff --git a/statsmodels/discrete/tests/test_discrete.py b/statsmodels/discrete/tests/test_discrete.py --- a/statsmodels/discrete/tests/test_discrete.py +++ b/statsmodels/discrete/tests/test_discrete.py @@ -1636,6 +1636,22 @@ def test_issue_341(): np.testing.assert_equal(res1.predict(x[None]).shape, (1,7)) +def test_negative_binomial_default_alpha_param(): + with pytest.warns(UserWarning, match='Negative binomial' + ' dispersion parameter alpha not set'): + sm.families.NegativeBinomial() + with pytest.warns(UserWarning, match='Negative binomial' + ' dispersion parameter alpha not set'): + sm.families.NegativeBinomial(link=sm.families.links.nbinom(alpha=1.0)) + with warnings.catch_warnings(): + warnings.simplefilter("error") + sm.families.NegativeBinomial(alpha=1.0) + with warnings.catch_warnings(): + warnings.simplefilter("error") + sm.families.NegativeBinomial(link=sm.families.links.nbinom(alpha=1.0), + alpha=1.0) + + def test_iscount(): X = np.random.random((50, 10)) X[:,2] = np.random.randint(1, 10, size=50)
ENH: GLM add warning if negbin parameter is default I have seen several examples, e.g. on stackoverflow, where users do not set the GLM-negbin parameter. I guess users often assume that the extra parameter is estimated. A warning if the default parameter is used would be useful to reduce the risk that user misunderstand the model.
"2022-08-14T21:54:17Z"
0.13
[ "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_score", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_fit_regularized", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_hessian", "statsmodels/discrete/tests/test_discrete.py::test_issue_341", "statsmodels/discrete/tests/test_discrete.py::test_formula_missing_exposure", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor", "statsmodels/discrete/tests/test_discrete.py::test_binary_pred_table_zeros", "statsmodels/discrete/tests/test_discrete.py::test_predict_with_exposure", "statsmodels/discrete/tests/test_discrete.py::test_iscount", "statsmodels/discrete/tests/test_discrete.py::test_poisson_newton", "statsmodels/discrete/tests/test_discrete.py::test_isdummy", "statsmodels/discrete/tests/test_discrete.py::test_non_binary", "statsmodels/discrete/tests/test_discrete.py::test_perfect_prediction", "statsmodels/discrete/tests/test_discrete.py::test_poisson_predict", "statsmodels/discrete/tests/test_discrete.py::test_issue_339", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor_categorical", "statsmodels/discrete/tests/test_discrete.py::test_null_options", "statsmodels/discrete/tests/test_discrete.py::test_cov_confint_pandas", "statsmodels/discrete/tests/test_discrete.py::test_unchanging_degrees_of_freedom", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_float_name", "statsmodels/discrete/tests/test_discrete.py::test_t_test", "statsmodels/discrete/tests/test_discrete.py::test_optim_kwds_prelim", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_init_kwargs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p1", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p2", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_dummy_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroMNLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestSweepAlphaL1::test_sweep_alpha", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_converged", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_wald", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_newton", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_mean_var", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_basic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_diagnostic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fittedvalues" ]
[ "statsmodels/discrete/tests/test_discrete.py::test_negative_binomial_default_alpha_param" ]
Python
[]
[]
statsmodels/statsmodels
8,537
statsmodels__statsmodels-8537
[ "8471" ]
e544656a2d0ab4ff33f30deca118d8366be16cb8
diff --git a/statsmodels/tsa/stattools.py b/statsmodels/tsa/stattools.py --- a/statsmodels/tsa/stattools.py +++ b/statsmodels/tsa/stattools.py @@ -268,6 +268,9 @@ def adfuller( store = bool_like(store, "store") regresults = bool_like(regresults, "regresults") + if x.max() == x.min(): + raise ValueError("Invalid input, x is constant") + if regresults: store = True
diff --git a/statsmodels/tsa/tests/test_stattools.py b/statsmodels/tsa/tests/test_stattools.py --- a/statsmodels/tsa/tests/test_stattools.py +++ b/statsmodels/tsa/tests/test_stattools.py @@ -188,6 +188,12 @@ def test_store_str(self): ) [email protected]("x", [np.full(8, 5.0)]) +def test_adfuller_resid_variance_zero(x): + with pytest.raises(ValueError): + adfuller(x) + + class CheckCorrGram: """ Set up for ACF, PACF tests.
DIckey Fuller test breaks on constant values #### Describe the bug Dickey-Fuller test gives error when testing with constant values. #### Code Sample, a copy-pastable example if possible ```python adfuller(np.full(8, 5.0)) ``` `RuntimeWarning: divide by zero encountered in log llf = -nobs2*np.log(2*np.pi) - nobs2*np.log(ssr / nobs) - nobs2` #### Expected Output I have expected that this series is stationary #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.10.6.final.0 OS: Linux 5.15.0-50-generic #56-Ubuntu SMP Tue Sep 20 13:23:26 UTC 2022 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.13.2 (/home/michael/.local/lib/python3.10/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.23.3 (/home/michael/.local/lib/python3.10/site-packages/numpy) scipy: 1.9.2 (/home/michael/.local/lib/python3.10/site-packages/scipy) pandas: 1.5.0 (/home/michael/.local/lib/python3.10/site-packages/pandas) dateutil: 2.8.2 (/home/michael/.local/lib/python3.10/site-packages/dateutil) patsy: 0.5.3 (/home/michael/.local/lib/python3.10/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.6.1 (/home/michael/.local/lib/python3.10/site-packages/matplotlib) backend: module://matplotlib_inline.backend_inline cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: 8.5.0 (/home/michael/.local/lib/python3.10/site-packages/IPython) jinja2: 3.1.2 (/home/michael/.local/lib/python3.10/site-packages/jinja2) sphinx: Not installed pygments: 2.13.0 (/home/michael/.local/lib/python3.10/site-packages/pygments) pytest: Not installed virtualenv: 20.16.5 (/home/michael/.local/lib/python3.10/site-packages/virtualenv) </details>
The test isn't well defined if the residual variance is 0. Ideally this would be caught and a meaningful exception raised.
"2022-11-24T17:33:37Z"
0.13
[ "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_store", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_raise", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_qstat_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_conservative", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_drop", "statsmodels/tsa/tests/test_stattools.py::TestCoint_t::test_tstat", "statsmodels/tsa/tests/test_stattools.py::test_coint_perfect_collinearity", "statsmodels/tsa/tests/test_stattools.py::test_coint", "statsmodels/tsa/tests/test_stattools.py::test_coint_identical_series", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acf_fft_dataframe", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_short_series", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_ar", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg_error", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf2d", "statsmodels/tsa/tests/test_stattools.py::test_levinson_durbin_acov", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic_failure", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-False]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_maxlag_too_large", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_pandasacovf", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_pandas", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_filter_kalman_filter", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_error", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_levinson_durbin", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_errors", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_rtol", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_nlags_error", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exception_maxlag", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset0]", "statsmodels/tsa/tests/test_stattools.py::test_coint_auto_tstat", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset3]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset2]", "statsmodels/tsa/tests/test_stattools.py::test_acf_conservate_nanops", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset1]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_verbose", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[decreasing-0.024390243902439025-0.9761904761904762]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_1d_input", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[2-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[increasing-41-0.023809523809523808]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[two-sided-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[0.5-10-0.09048484886749095]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_use_chi2", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_2d_input_with_missing_values", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_unclear_hypothesis", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_none", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_autolags_does_not_assign_lags_equal_to_nobs", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_unknown_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_legacy_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_store", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_confint", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_stkprc_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_array_shape", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[AIC]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_trim_value", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_gnpdef_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnp_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_regression_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[Aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnpq_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_autolag_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rand10000_case", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_store_str", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols_inefficient", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_burg", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ld", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_yw", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_zero_lag", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality_single", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_finite_check" ]
[ "statsmodels/tsa/tests/test_stattools.py::test_adfuller_resid_variance_zero[x0]" ]
Python
[]
[]
statsmodels/statsmodels
8,567
statsmodels__statsmodels-8567
[ "8363" ]
8fb709818984e9ddade22b72f9ed5cea561c38f2
diff --git a/statsmodels/regression/linear_model.py b/statsmodels/regression/linear_model.py --- a/statsmodels/regression/linear_model.py +++ b/statsmodels/regression/linear_model.py @@ -49,7 +49,10 @@ # need import in module instead of lazily to copy `__doc__` from statsmodels.regression._prediction import PredictionResults from statsmodels.tools.decorators import cache_readonly, cache_writable -from statsmodels.tools.sm_exceptions import InvalidTestWarning, ValueWarning +from statsmodels.tools.sm_exceptions import ( + InvalidTestWarning, + ValueWarning, + ) from statsmodels.tools.tools import pinv_extended from statsmodels.tools.typing import Float64Array from statsmodels.tools.validation import bool_like, float_like, string_like @@ -1475,7 +1478,15 @@ def yule_walker(x, order=1, method="adjusted", df=None, inv=False, r[k] = (x[0:-k] * x[k:]).sum() / (n - k * adj_needed) R = toeplitz(r[:-1]) - rho = np.linalg.solve(R, r[1:]) + try: + rho = np.linalg.solve(R, r[1:]) + except np.linalg.LinAlgError as err: + if 'Singular matrix' in str(err): + warnings.warn("Matrix is singular. Using pinv.", ValueWarning) + rho = np.linalg.pinv(R) @ r[1:] + else: + raise + sigmasq = r[0] - (r[1:]*rho).sum() sigma = np.sqrt(sigmasq) if not np.isnan(sigmasq) and sigmasq > 0 else np.nan if inv: diff --git a/statsmodels/stats/gof.py b/statsmodels/stats/gof.py --- a/statsmodels/stats/gof.py +++ b/statsmodels/stats/gof.py @@ -144,9 +144,9 @@ def powerdiscrepancy(observed, expected, lambd=0.0, axis=0, ddof=0): if axis == 0: e = e.T - if np.all(np.sum(e, axis=axis) == n): + if np.allclose(np.sum(e, axis=axis), n, rtol=1e-8, atol=0): p = e/(1.0*nt) - elif np.all(np.sum(e, axis=axis) == 1): + elif np.allclose(np.sum(e, axis=axis), 1, rtol=1e-8, atol=0): p = e e = nt * e else: diff --git a/statsmodels/tsa/stattools.py b/statsmodels/tsa/stattools.py --- a/statsmodels/tsa/stattools.py +++ b/statsmodels/tsa/stattools.py @@ -24,6 +24,7 @@ InfeasibleTestError, InterpolationWarning, MissingDataError, + ValueWarning, ) from statsmodels.tools.tools import Bunch, add_constant from statsmodels.tools.validation import ( @@ -750,8 +751,10 @@ def pacf_yw(x, nlags=None, method="adjusted"): method = string_like(method, "method", options=("adjusted", "mle")) pacf = [1.0] - for k in range(1, nlags + 1): - pacf.append(yule_walker(x, k, method=method)[0][-1]) + with warnings.catch_warnings(): + warnings.simplefilter("once", ValueWarning) + for k in range(1, nlags + 1): + pacf.append(yule_walker(x, k, method=method)[0][-1]) return np.array(pacf)
diff --git a/statsmodels/tsa/tests/test_stattools.py b/statsmodels/tsa/tests/test_stattools.py --- a/statsmodels/tsa/tests/test_stattools.py +++ b/statsmodels/tsa/tests/test_stattools.py @@ -25,6 +25,7 @@ InfeasibleTestError, InterpolationWarning, MissingDataError, + ValueWarning, ) # Remove imports when range unit root test gets an R implementation from statsmodels.tools.validation import array_like, bool_like @@ -346,6 +347,10 @@ def test_yw(self): pacfyw = pacf_yw(self.x, nlags=40, method="mle") assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8) + def test_yw_singular(self): + with pytest.warns(ValueWarning): + pacf(np.ones(30), nlags=6) + def test_ld(self): pacfyw = pacf_yw(self.x, nlags=40, method="mle") pacfld = pacf(self.x, nlags=40, method="ldb")
PACF Fails When Matrix for Yule-Walker Equations is Singular When we try to use the `pacf` function and the matrix for the Yule-Walker equations is singular, we get a [`LinAlgError`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.LinAlgError.html) and the code fails. This also leads to downstream failures. We could resolve this by instead using the psuedo-inverse function ([`numpy.linalg.pinv`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.pinv.html)) when solving the equations [here](https://github.com/statsmodels/statsmodels/blob/main/statsmodels/regression/linear_model.py#L1471). Here is an example of code that will currently fail with a `LinAlgError`. ```python import numpy as np from statsmodels.tsa.stattools import acf, kpss, pacf pacf(np.ones(30), nlags=6) ```
"2022-12-08T18:48:34Z"
0.13
[ "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_confint", "statsmodels/tsa/tests/test_stattools.py::TestACF::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality_single", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_grangercausality", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_finite_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestGrangerCausality::test_granger_fails_on_zero_lag", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_acf", "statsmodels/tsa/tests/test_stattools.py::TestACF_FFT::test_qstat", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_resid_variance_zero[x0]", "statsmodels/tsa/tests/test_stattools.py::test_coint_identical_series", "statsmodels/tsa/tests/test_stattools.py::test_coint", "statsmodels/tsa/tests/test_stattools.py::test_coint_perfect_collinearity", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acf_fft_dataframe", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_pandasacovf", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[True-True]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_maxlag_too_large", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_filter_kalman_filter", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_pandas", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[True-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-False]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-True-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf2d", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_burg_error", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-raise]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-False-True-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[True-True-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_ar", "statsmodels/tsa/tests/test_stattools.py::test_innovations_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags_missing[False-False-True-conservative]", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_errors", "statsmodels/tsa/tests/test_stattools.py::test_acovf_error", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-none]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-False-drop]", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[True-True-False]", "statsmodels/tsa/tests/test_stattools.py::test_adfuller_short_series", "statsmodels/tsa/tests/test_stattools.py::test_compare_acovf_vs_ccovf[False-False-False]", "statsmodels/tsa/tests/test_stattools.py::test_ccovf_fft_vs_convolution[False-True]", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-True-False-raise]", "statsmodels/tsa/tests/test_stattools.py::test_arma_order_select_ic_failure", "statsmodels/tsa/tests/test_stattools.py::test_innovations_filter_errors", "statsmodels/tsa/tests/test_stattools.py::test_pacf2acf_levinson_durbin", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_rtol", "statsmodels/tsa/tests/test_stattools.py::test_acovf_nlags[False-False-True-none]", "statsmodels/tsa/tests/test_stattools.py::test_levinson_durbin_acov", "statsmodels/tsa/tests/test_stattools.py::test_innovations_algo_brockwell_davis", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset3]", "statsmodels/tsa/tests/test_stattools.py::test_pacf_nlags_error", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset2]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_verbose", "statsmodels/tsa/tests/test_stattools.py::test_acf_conservate_nanops", "statsmodels/tsa/tests/test_stattools.py::test_coint_auto_tstat", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exception_maxlag", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset1]", "statsmodels/tsa/tests/test_stattools.py::test_granger_causality_exceptions[dataset0]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_use_chi2", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_1d_input", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_2d_input_with_missing_values", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[increasing-41-0.023809523809523808]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[0.5-10-0.09048484886749095]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[two-sided-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_subset_length[2-41-0.047619047619047616]", "statsmodels/tsa/tests/test_stattools.py::TestBreakvarHeteroskedasticityTest::test_alternative[decreasing-0.024390243902439025-0.9761904761904762]", "statsmodels/tsa/tests/test_stattools.py::TestCoint_t::test_tstat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestRUR::test_store", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_conservative", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_qstat_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_raise", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_none", "statsmodels/tsa/tests/test_stattools.py::TestACFMissing::test_acf_drop", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_stkprc_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[AIC]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnpq_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_gnpdef_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_trim_value", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rand10000_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_autolag_type", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_rgnp_case", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_autolag_case_sensitivity[Aic]", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_array_shape", "statsmodels/tsa/tests/test_stattools.py::TestZivotAndrews::test_fail_regression_type", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_unclear_hypothesis", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_fails_on_nobs_check", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_kpss_autolags_does_not_assign_lags_equal_to_nobs", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_store", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_unknown_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_fail_nonvector_input", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_pval", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_legacy_lags", "statsmodels/tsa/tests/test_stattools.py::TestKPSS::test_none", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ld", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_yw", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_burg", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols_inefficient", "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_ols", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_critvalues", "statsmodels/tsa/tests/test_stattools.py::TestADFConstantTrend2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_pvalue", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_store_str", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_teststat", "statsmodels/tsa/tests/test_stattools.py::TestADFNoConstant2::test_critvalues" ]
[ "statsmodels/tsa/tests/test_stattools.py::TestPACF::test_yw_singular" ]
Python
[]
[]
statsmodels/statsmodels
8,569
statsmodels__statsmodels-8569
[ "1880" ]
edfcbbd542c3206d27d903f9b8f0c42c4231dc97
diff --git a/statsmodels/discrete/discrete_model.py b/statsmodels/discrete/discrete_model.py --- a/statsmodels/discrete/discrete_model.py +++ b/statsmodels/discrete/discrete_model.py @@ -2594,7 +2594,7 @@ class Probit(BinaryModel): @cache_readonly def link(self): from statsmodels.genmod.families import links - link = links.probit() + link = links.Probit() return link def cdf(self, X): diff --git a/statsmodels/genmod/families/family.py b/statsmodels/genmod/families/family.py --- a/statsmodels/genmod/families/family.py +++ b/statsmodels/genmod/families/family.py @@ -14,6 +14,9 @@ from scipy import special, stats from statsmodels.compat.scipy import SP_LT_17 +from statsmodels.tools.sm_exceptions import ( + ValueWarning, + ) from . import links as L, varfuncs as V FLOAT_EPS = np.finfo(float).eps @@ -59,13 +62,14 @@ def _setlink(self, link): # <statsmodels.family.links.Power object at 0x9a4236c>] # for Poisson... self._link = link - if not isinstance(link, L.Link): - raise TypeError("The input should be a valid Link object.") - if hasattr(self, "links"): - validlink = max([isinstance(link, _) for _ in self.links]) - if not validlink: - errmsg = "Invalid link for family, should be in %s. (got %s)" - raise ValueError(errmsg % (repr(self.links), link)) + if self._check_link: + if not isinstance(link, L.Link): + raise TypeError("The input should be a valid Link object.") + if hasattr(self, "links"): + validlink = max([isinstance(link, _) for _ in self.links]) + if not validlink: + msg = "Invalid link for family, should be in %s. (got %s)" + raise ValueError(msg % (repr(self.links), link)) def _getlink(self): """ @@ -76,15 +80,16 @@ def _getlink(self): # link property for each family is a pointer to link instance link = property(_getlink, _setlink, doc="Link function for family") - def __init__(self, link, variance): + def __init__(self, link, variance, check_link=True): + self._check_link = check_link if inspect.isclass(link): warnmssg = ( "Calling Family(..) with a link class is not allowed. Use an " "instance of a link class instead." ) raise TypeError(warnmssg) - else: - self.link = link + + self.link = link self.variance = variance def starting_mu(self, y): @@ -397,10 +402,14 @@ class Poisson(Family): valid = [0, np.inf] safe_links = [L.Log, ] - def __init__(self, link=None): + def __init__(self, link=None, check_link=True): if link is None: link = L.Log() - super(Poisson, self).__init__(link=link, variance=Poisson.variance) + super(Poisson, self).__init__( + link=link, + variance=Poisson.variance, + check_link=check_link + ) def _resid_dev(self, endog, mu): r""" @@ -545,10 +554,14 @@ class Gaussian(Family): variance = V.constant safe_links = links - def __init__(self, link=None): + def __init__(self, link=None, check_link=True): if link is None: link = L.Identity() - super(Gaussian, self).__init__(link=link, variance=Gaussian.variance) + super(Gaussian, self).__init__( + link=link, + variance=Gaussian.variance, + check_link=check_link + ) def _resid_dev(self, endog, mu): r""" @@ -710,10 +723,14 @@ class Gamma(Family): variance = V.mu_squared safe_links = [L.Log, ] - def __init__(self, link=None): + def __init__(self, link=None, check_link=True): if link is None: - link = L.inverse_power() - super(Gamma, self).__init__(link=link, variance=Gamma.variance) + link = L.InversePower() + super(Gamma, self).__init__( + link=link, + variance=Gamma.variance, + check_link=check_link + ) def _resid_dev(self, endog, mu): r""" @@ -884,7 +901,7 @@ class Binomial(Family): # Other safe links, e.g. cloglog and probit are subclasses safe_links = [L.Logit, L.CDFLink] - def __init__(self, link=None): # , n=1.): + def __init__(self, link=None, check_link=True): # , n=1.): if link is None: link = L.Logit() # TODO: it *should* work for a constant n>1 actually, if freq_weights @@ -892,8 +909,11 @@ def __init__(self, link=None): # , n=1.): self.n = 1 # overwritten by initialize if needed but always used to initialize # variance since endog is assumed/forced to be (0,1) - super(Binomial, self).__init__(link=link, - variance=V.Binomial(n=self.n)) + super(Binomial, self).__init__( + link=link, + variance=V.Binomial(n=self.n), + check_link=check_link + ) def starting_mu(self, y): r""" @@ -1114,7 +1134,7 @@ class InverseGaussian(Family): link : a link instance, optional The default link for the inverse Gaussian family is the inverse squared link. - Available links are inverse_squared, inverse, log, and identity. + Available links are InverseSquared, Inverse, Log, and Identity. See statsmodels.genmod.families.links for more information. Attributes @@ -1140,11 +1160,14 @@ class InverseGaussian(Family): variance = V.mu_cubed safe_links = [L.InverseSquared, L.Log, ] - def __init__(self, link=None): + def __init__(self, link=None, check_link=True): if link is None: - link = L.inverse_squared() + link = L.InverseSquared() super(InverseGaussian, self).__init__( - link=link, variance=InverseGaussian.variance) + link=link, + variance=InverseGaussian.variance, + check_link=check_link + ) def _resid_dev(self, endog, mu): r""" @@ -1310,15 +1333,19 @@ class NegativeBinomial(Family): variance = V.nbinom safe_links = [L.Log, ] - def __init__(self, link=None, alpha=1.): + def __init__(self, link=None, alpha=1., check_link=True): self.alpha = 1. * alpha # make it at least float if alpha is self.__init__.__defaults__[1]: # `is` is intentional warnings.warn("Negative binomial dispersion parameter alpha not " - f"set. Using default value alpha={alpha}.") + f"set. Using default value alpha={alpha}.", + ValueWarning) if link is None: link = L.Log() super(NegativeBinomial, self).__init__( - link=link, variance=V.NegativeBinomial(alpha=self.alpha)) + link=link, + variance=V.NegativeBinomial(alpha=self.alpha), + check_link=check_link + ) def _resid_dev(self, endog, mu): r""" @@ -1518,7 +1545,7 @@ class Tweedie(Family): variance = V.Power(power=1.5) safe_links = [L.Log, L.Power] - def __init__(self, link=None, var_power=1., eql=False): + def __init__(self, link=None, var_power=1., eql=False, check_link=True): self.var_power = var_power self.eql = eql if eql and (var_power < 1 or var_power > 2): @@ -1527,7 +1554,10 @@ def __init__(self, link=None, var_power=1., eql=False): if link is None: link = L.Log() super(Tweedie, self).__init__( - link=link, variance=V.Power(power=var_power * 1.)) + link=link, + variance=V.Power(power=var_power * 1.), + check_link=check_link + ) def _resid_dev(self, endog, mu): r""" diff --git a/statsmodels/genmod/families/links.py b/statsmodels/genmod/families/links.py --- a/statsmodels/genmod/families/links.py +++ b/statsmodels/genmod/families/links.py @@ -15,6 +15,7 @@ def _link_deprecation_warning(old, new): f"link alias will be removed after the 0.15.0 release.", FutureWarning ) + # raise class Link: @@ -268,10 +269,10 @@ class Power(Link): Notes ----- Aliases of Power: - inverse = Power(power=-1) - sqrt = Power(power=.5) - inverse_squared = Power(power=-2.) - identity = Power(power=1.) + Inverse = Power(power=-1) + Sqrt = Power(power=.5) + InverseSquared = Power(power=-2.) + Identity = Power(power=1.) """ def __init__(self, power=1.): diff --git a/statsmodels/genmod/generalized_estimating_equations.py b/statsmodels/genmod/generalized_estimating_equations.py --- a/statsmodels/genmod/generalized_estimating_equations.py +++ b/statsmodels/genmod/generalized_estimating_equations.py @@ -2985,7 +2985,7 @@ class _Multinomial(families.Family): variance = varfuncs.binary safe_links = [_MultinomialLogit, ] - def __init__(self, nlevels): + def __init__(self, nlevels, check_link=True): """ Parameters ---------- @@ -2993,6 +2993,7 @@ def __init__(self, nlevels): The number of distinct categories for the multinomial distribution. """ + self._check_link = check_link self.initialize(nlevels) def initialize(self, nlevels):
diff --git a/statsmodels/discrete/tests/test_predict.py b/statsmodels/discrete/tests/test_predict.py --- a/statsmodels/discrete/tests/test_predict.py +++ b/statsmodels/discrete/tests/test_predict.py @@ -385,7 +385,7 @@ def test_distr(case): if cls_model in models_influ: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=UserWarning) - # ZI models warn about missint hat_matrix_diag + # ZI models warn about missing hat_matrix_diag influ = res.get_influence() influ.summary_frame() @@ -405,7 +405,7 @@ def test_distr(case): try: with warnings.catch_warnings(): - # ZI models warn about missint hat_matrix_diag + # ZI models warn about missing hat_matrix_diag warnings.simplefilter("ignore", category=UserWarning) influ.plot_influence() except ImportError: diff --git a/statsmodels/discrete/tests/test_truncated_model.py b/statsmodels/discrete/tests/test_truncated_model.py --- a/statsmodels/discrete/tests/test_truncated_model.py +++ b/statsmodels/discrete/tests/test_truncated_model.py @@ -1,10 +1,15 @@ +import warnings + import numpy as np from numpy.testing import assert_allclose, assert_equal from statsmodels import datasets from statsmodels.tools.tools import add_constant from statsmodels.tools.testing import Holder +from statsmodels.tools.sm_exceptions import ( + ConvergenceWarning, + ) from statsmodels.distributions.discrete import ( truncatedpoisson, @@ -45,9 +50,12 @@ def test_bic(self): def test_fit_regularized(self): model = self.res1.model - alpha = np.ones(len(self.res1.params)) - res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=ConvergenceWarning) + # This does not catch all Convergence warnings, why? + res_reg = model.fit_regularized(alpha=alpha*0.01, disp=0) assert_allclose(res_reg.params, self.res1.params, rtol=1e-3, atol=5e-3) diff --git a/statsmodels/genmod/families/tests/test_family.py b/statsmodels/genmod/families/tests/test_family.py --- a/statsmodels/genmod/families/tests/test_family.py +++ b/statsmodels/genmod/families/tests/test_family.py @@ -11,6 +11,9 @@ from scipy import integrate from statsmodels.compat.scipy import SP_LT_17 +from statsmodels.tools.sm_exceptions import ( + ValueWarning, + ) import statsmodels.genmod.families as F from statsmodels.genmod.families.family import Tweedie import statsmodels.genmod.families.links as L @@ -56,6 +59,8 @@ def test_invalid_family_link(family, links): "Using default value alpha=1.0.") warnings.filterwarnings("ignore", message=msg, category=UserWarning) + warnings.filterwarnings("ignore", + category=FutureWarning) for link in invalid_links: family(link()) @@ -66,11 +71,26 @@ def test_family_link(family, links): msg = ("Negative binomial dispersion parameter alpha not set. " "Using default value alpha=1.0.") warnings.filterwarnings("ignore", message=msg, - category=UserWarning) + category=ValueWarning) + warnings.filterwarnings("ignore", + category=FutureWarning) for link in links: assert family(link()) [email protected]("family, links", link_cases) +def test_family_link_check(family, links): + # check that we can turn of all link checks + class Hugo(): + pass + with warnings.catch_warnings(): + msg = ("Negative binomial dispersion parameter alpha not set. " + "Using default value alpha=1.0.") + warnings.filterwarnings("ignore", message=msg, + category=ValueWarning) + assert family(Hugo(), check_link=False) + + @pytest.mark.skipif(SP_LT_17, reason="Scipy too old, function not available") @pytest.mark.parametrize("power", (1.1, 1.5, 1.9)) def test_tweedie_loglike_obs(power): diff --git a/statsmodels/genmod/families/tests/test_link.py b/statsmodels/genmod/families/tests/test_link.py --- a/statsmodels/genmod/families/tests/test_link.py +++ b/statsmodels/genmod/families/tests/test_link.py @@ -12,14 +12,14 @@ # Family instances links = families.links logit = links.Logit() -inverse_power = links.inverse_power() -sqrt = links.sqrt() -inverse_squared = links.inverse_squared() -identity = links.identity() -log = links.log() -logc = links.logc() -probit = links.probit() -cauchy = links.cauchy() +inverse_power = links.InversePower() +sqrt = links.Sqrt() +inverse_squared = links.InverseSquared() +identity = links.Identity() +log = links.Log() +logc = links.LogC() +probit = links.Probit() +cauchy = links.Cauchy() cloglog = links.CLogLog() loglog = links.LogLog() negbinom = links.NegativeBinomial() @@ -72,7 +72,7 @@ def test_deriv(): for link in Links: for k in range(10): p = np.random.uniform(0, 1) - if isinstance(link, links.cauchy): + if isinstance(link, links.Cauchy): p = np.clip(p, 0.03, 0.97) d = link.deriv(p) da = nd.approx_fprime(np.r_[p], link) @@ -166,15 +166,15 @@ class CasesCDFLink(): # just as namespace to hold cases for test_cdflink link_pairs = [ - (links.CDFLink(dbn=stats.gumbel_l), links.cloglog()), - (links.CDFLink(dbn=stats.gumbel_r), links.loglog()), - (links.CDFLink(dbn=stats.norm), links.probit()), - (links.CDFLink(dbn=stats.logistic), links.logit()), - (links.CDFLink(dbn=stats.t(1)), links.cauchy()), + (links.CDFLink(dbn=stats.gumbel_l), links.CLogLog()), + (links.CDFLink(dbn=stats.gumbel_r), links.LogLog()), + (links.CDFLink(dbn=stats.norm), links.Probit()), + (links.CDFLink(dbn=stats.logistic), links.Logit()), + (links.CDFLink(dbn=stats.t(1)), links.Cauchy()), # approximation of t by normal is not good enough for rtol, atol # (links.CDFLink(dbn=stats.t(1000000)), links.probit()), - (MyCLogLog(), links.cloglog()), # not a cdflink, but compares + (MyCLogLog(), links.CLogLog()), # not a cdflink, but compares ] methods = ['__call__', 'deriv', 'inverse', 'inverse_deriv', 'deriv2', diff --git a/statsmodels/genmod/tests/test_gee_glm.py b/statsmodels/genmod/tests/test_gee_glm.py --- a/statsmodels/genmod/tests/test_gee_glm.py +++ b/statsmodels/genmod/tests/test_gee_glm.py @@ -110,7 +110,7 @@ class TestCompareGamma(CheckGEEGLM): def setup_class(cls): # adjusted for Gamma, not in test_gee.py vs = Independence() - family = families.Gamma(link=links.log()) + family = families.Gamma(link=links.Log()) np.random.seed(987126) #Y = np.random.normal(size=100)**2 Y = np.exp(0.1 + np.random.normal(size=100)) # log-normal diff --git a/statsmodels/genmod/tests/test_glm.py b/statsmodels/genmod/tests/test_glm.py --- a/statsmodels/genmod/tests/test_glm.py +++ b/statsmodels/genmod/tests/test_glm.py @@ -427,7 +427,7 @@ def setup_class(cls): 0.001 * np.random.randn(nobs) GaussLog_Model = GLM(cls.lny, cls.X, - family=sm.families.Gaussian(sm.families.links.log())) + family=sm.families.Gaussian(sm.families.links.Log())) cls.res1 = GaussLog_Model.fit() from .results.results_glm import GaussianLog cls.res2 = GaussianLog() @@ -457,7 +457,7 @@ def setup_class(cls): cls.X = np.c_[np.ones((nobs,1)),x,x**2] cls.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs) InverseLink_Model = GLM(cls.y_inv, cls.X, - family=sm.families.Gaussian(sm.families.links.inverse_power())) + family=sm.families.Gaussian(sm.families.links.InversePower())) InverseLink_Res = InverseLink_Model.fit() cls.res1 = InverseLink_Res from .results.results_glm import GaussianInverse @@ -683,7 +683,7 @@ def setup_class(cls): from .results.results_glm import CancerLog res2 = CancerLog() cls.res1 = GLM(res2.endog, res2.exog, - family=sm.families.Gamma(link=sm.families.links.log())).fit() + family=sm.families.Gamma(link=sm.families.links.Log())).fit() cls.res2 = res2 # FIXME: enable or delete @@ -709,7 +709,7 @@ def setup_class(cls): res2 = CancerIdentity() with warnings.catch_warnings(): warnings.simplefilter("ignore") - fam = sm.families.Gamma(link=sm.families.links.identity()) + fam = sm.families.Gamma(link=sm.families.links.Identity()) cls.res1 = GLM(res2.endog, res2.exog, family=fam).fit() cls.res2 = res2 @@ -793,7 +793,7 @@ def setup_class(cls): res2 = InvGaussLog() cls.res1 = GLM(res2.endog, res2.exog, family=sm.families.InverseGaussian( - link=sm.families.links.log())).fit() + link=sm.families.links.Log())).fit() cls.res2 = res2 # FIXME: enable or delete @@ -820,7 +820,7 @@ def setup_class(cls): warnings.simplefilter("ignore") cls.res1 = GLM(data.endog, data.exog, family=sm.families.InverseGaussian( - link=sm.families.links.identity())).fit() + link=sm.families.links.Identity())).fit() from .results.results_glm import InvGaussIdentity cls.res2 = InvGaussIdentity() @@ -1098,7 +1098,7 @@ def test_formula_missing_exposure(): 'x': [1, 3, 2, 1.5]} df = pd.DataFrame(d) - family = sm.families.Gaussian(link=sm.families.links.log()) + family = sm.families.Gaussian(link=sm.families.links.Log()) mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure, family=family) @@ -1235,12 +1235,12 @@ def test_gradient_irls(): fam = sm.families lnk = sm.families.links - families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]), - (fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]), - (fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]), - (fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]), - (fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]), - (fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])] + families = [(fam.Binomial, [lnk.Logit, lnk.Probit, lnk.CLogLog, lnk.Log, lnk.Cauchy]), + (fam.Poisson, [lnk.Log, lnk.Identity, lnk.Sqrt]), + (fam.Gamma, [lnk.Log, lnk.Identity, lnk.InversePower]), + (fam.Gaussian, [lnk.Identity, lnk.Log, lnk.InversePower]), + (fam.InverseGaussian, [lnk.Log, lnk.Identity, lnk.InversePower, lnk.InverseSquared]), + (fam.NegativeBinomial, [lnk.Log, lnk.InversePower, lnk.InverseSquared, lnk.Identity])] n = 100 p = 3 @@ -1255,36 +1255,36 @@ def test_gradient_irls(): if family_class != fam.Binomial and binom_version == 1: continue - if (family_class, link) == (fam.Poisson, lnk.identity): + if (family_class, link) == (fam.Poisson, lnk.Identity): lin_pred = 20 + exog.sum(1) - elif (family_class, link) == (fam.Binomial, lnk.log): + elif (family_class, link) == (fam.Binomial, lnk.Log): lin_pred = -1 + exog.sum(1) / 8 - elif (family_class, link) == (fam.Poisson, lnk.sqrt): + elif (family_class, link) == (fam.Poisson, lnk.Sqrt): lin_pred = 2 + exog.sum(1) - elif (family_class, link) == (fam.InverseGaussian, lnk.log): + elif (family_class, link) == (fam.InverseGaussian, lnk.Log): #skip_zero = True lin_pred = -1 + exog.sum(1) - elif (family_class, link) == (fam.InverseGaussian, lnk.identity): + elif (family_class, link) == (fam.InverseGaussian, lnk.Identity): lin_pred = 20 + 5*exog.sum(1) lin_pred = np.clip(lin_pred, 1e-4, np.inf) - elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared): + elif (family_class, link) == (fam.InverseGaussian, lnk.InverseSquared): lin_pred = 0.5 + exog.sum(1) / 5 continue # skip due to non-convergence - elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power): + elif (family_class, link) == (fam.InverseGaussian, lnk.InversePower): lin_pred = 1 + exog.sum(1) / 5 - elif (family_class, link) == (fam.NegativeBinomial, lnk.identity): + elif (family_class, link) == (fam.NegativeBinomial, lnk.Identity): lin_pred = 20 + 5*exog.sum(1) lin_pred = np.clip(lin_pred, 1e-4, np.inf) - elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared): + elif (family_class, link) == (fam.NegativeBinomial, lnk.InverseSquared): lin_pred = 0.1 + np.random.uniform(size=exog.shape[0]) continue # skip due to non-convergence - elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power): + elif (family_class, link) == (fam.NegativeBinomial, lnk.InversePower): lin_pred = 1 + exog.sum(1) / 5 - elif (family_class, link) == (fam.Gaussian, lnk.inverse_power): + elif (family_class, link) == (fam.Gaussian, lnk.InversePower): # adding skip because of convergence failure skip_one = True - # the following fails with identity link, because endog < 0 + # the following fails with Identity link, because endog < 0 # elif family_class == fam.Gamma: # lin_pred = 0.5 * exog.sum(1) + np.random.uniform(size=exog.shape[0]) else: @@ -1297,9 +1297,9 @@ def test_gradient_irls(): mod_irls = sm.GLM(endog, exog, family=family_class(link=link())) rslt_irls = mod_irls.fit(method="IRLS") - if not (family_class, link) in [(fam.Poisson, lnk.sqrt), - (fam.Gamma, lnk.inverse_power), - (fam.InverseGaussian, lnk.identity) + if not (family_class, link) in [(fam.Poisson, lnk.Sqrt), + (fam.Gamma, lnk.InversePower), + (fam.InverseGaussian, lnk.Identity) ]: check_score_hessian(rslt_irls) @@ -1348,16 +1348,16 @@ def test_gradient_irls_eim(): fam = sm.families lnk = sm.families.links - families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, - lnk.cauchy]), - (fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]), - (fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]), - (fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]), - (fam.InverseGaussian, [lnk.log, lnk.identity, - lnk.inverse_power, - lnk.inverse_squared]), - (fam.NegativeBinomial, [lnk.log, lnk.inverse_power, - lnk.inverse_squared, lnk.identity])] + families = [(fam.Binomial, [lnk.Logit, lnk.Probit, lnk.CLogLog, lnk.Log, + lnk.Cauchy]), + (fam.Poisson, [lnk.Log, lnk.Identity, lnk.Sqrt]), + (fam.Gamma, [lnk.Log, lnk.Identity, lnk.InversePower]), + (fam.Gaussian, [lnk.Identity, lnk.Log, lnk.InversePower]), + (fam.InverseGaussian, [lnk.Log, lnk.Identity, + lnk.InversePower, + lnk.InverseSquared]), + (fam.NegativeBinomial, [lnk.Log, lnk.InversePower, + lnk.InverseSquared, lnk.Identity])] n = 100 p = 3 @@ -1372,39 +1372,39 @@ def test_gradient_irls_eim(): if family_class != fam.Binomial and binom_version == 1: continue - if (family_class, link) == (fam.Poisson, lnk.identity): + if (family_class, link) == (fam.Poisson, lnk.Identity): lin_pred = 20 + exog.sum(1) - elif (family_class, link) == (fam.Binomial, lnk.log): + elif (family_class, link) == (fam.Binomial, lnk.Log): lin_pred = -1 + exog.sum(1) / 8 - elif (family_class, link) == (fam.Poisson, lnk.sqrt): + elif (family_class, link) == (fam.Poisson, lnk.Sqrt): lin_pred = 2 + exog.sum(1) - elif (family_class, link) == (fam.InverseGaussian, lnk.log): + elif (family_class, link) == (fam.InverseGaussian, lnk.Log): # skip_zero = True lin_pred = -1 + exog.sum(1) elif (family_class, link) == (fam.InverseGaussian, - lnk.identity): + lnk.Identity): lin_pred = 20 + 5*exog.sum(1) lin_pred = np.clip(lin_pred, 1e-4, np.inf) elif (family_class, link) == (fam.InverseGaussian, - lnk.inverse_squared): + lnk.InverseSquared): lin_pred = 0.5 + exog.sum(1) / 5 continue # skip due to non-convergence elif (family_class, link) == (fam.InverseGaussian, - lnk.inverse_power): + lnk.InversePower): lin_pred = 1 + exog.sum(1) / 5 elif (family_class, link) == (fam.NegativeBinomial, - lnk.identity): + lnk.Identity): lin_pred = 20 + 5*exog.sum(1) lin_pred = np.clip(lin_pred, 1e-4, np.inf) elif (family_class, link) == (fam.NegativeBinomial, - lnk.inverse_squared): + lnk.InverseSquared): lin_pred = 0.1 + np.random.uniform(size=exog.shape[0]) continue # skip due to non-convergence elif (family_class, link) == (fam.NegativeBinomial, - lnk.inverse_power): + lnk.InversePower): lin_pred = 1 + exog.sum(1) / 5 - elif (family_class, link) == (fam.Gaussian, lnk.inverse_power): + elif (family_class, link) == (fam.Gaussian, lnk.InversePower): # adding skip because of convergence failure skip_one = True else: @@ -1701,7 +1701,7 @@ def setup_class(cls): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=DomainWarning) family_link = sm.families.NegativeBinomial( - link=sm.families.links.nbinom(alpha=alpha), + link=sm.families.links.NegativeBinomial(alpha=alpha), alpha=alpha) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, @@ -1718,7 +1718,7 @@ def setup_class(cls): Tests Gamma family with log link. ''' super(TestWtdGlmGamma, cls).setup_class() - family_link = sm.families.Gamma(sm.families.links.log()) + family_link = sm.families.Gamma(sm.families.links.Log()) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, family=family_link).fit() @@ -1733,7 +1733,7 @@ def setup_class(cls): Tests Gaussian family with log link. ''' super(TestWtdGlmGaussian, cls).setup_class() - family_link = sm.families.Gaussian(sm.families.links.log()) + family_link = sm.families.Gaussian(sm.families.links.Log()) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, family=family_link).fit() @@ -1748,7 +1748,7 @@ def setup_class(cls): Tests InverseGaussian family with log link. ''' super(TestWtdGlmInverseGaussian, cls).setup_class() - family_link = sm.families.InverseGaussian(sm.families.links.log()) + family_link = sm.families.InverseGaussian(sm.families.links.Log()) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, family=family_link).fit() @@ -1763,7 +1763,7 @@ def setup_class(cls): Tests Gamma family with log link. ''' super(TestWtdGlmGammaNewton, cls).setup_class() - family_link = sm.families.Gamma(sm.families.links.log()) + family_link = sm.families.Gamma(sm.families.links.Log()) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, family=family_link @@ -1773,7 +1773,7 @@ def setup_class(cls): ).fit(method='newton') def test_init_kwargs(self): - family_link = sm.families.Gamma(sm.families.links.log()) + family_link = sm.families.Gamma(sm.families.links.Log()) with pytest.warns(ValueWarning, match="unknown kwargs"): GLM(self.endog, self.exog, family=family_link, @@ -1788,7 +1788,7 @@ def setup_class(cls): Tests Gamma family with log link. ''' super(TestWtdGlmGammaScale_X2, cls).setup_class() - family_link = sm.families.Gamma(sm.families.links.log()) + family_link = sm.families.Gamma(sm.families.links.Log()) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, family=family_link, @@ -1805,7 +1805,7 @@ def setup_class(cls): Tests Gamma family with log link. ''' super(TestWtdGlmGammaScale_dev, cls).setup_class() - family_link = sm.families.Gamma(sm.families.links.log()) + family_link = sm.families.Gamma(sm.families.links.Log()) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, family=family_link, @@ -1837,7 +1837,7 @@ def setup_class(cls): Tests Tweedie family with log link and var_power=1. ''' super(TestWtdTweedieLog, cls).setup_class() - family_link = sm.families.Tweedie(link=sm.families.links.log(), + family_link = sm.families.Tweedie(link=sm.families.links.Log(), var_power=1) cls.res1 = GLM(cls.endog, cls.exog, freq_weights=cls.weight, @@ -2000,7 +2000,7 @@ def setup_class(cls): cls.data = cpunish.load_pandas() cls.exog = cls.data.exog[['INCOME', 'SOUTH']] cls.endog = cls.data.endog - family_link = sm.families.Tweedie(link=sm.families.links.log(), + family_link = sm.families.Tweedie(link=sm.families.links.Log(), var_power=1.) cls.res1 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], @@ -2015,7 +2015,7 @@ def setup_class(cls): from .results.results_glm import FairTweedieLog15 data = load_pandas() - family_link = sm.families.Tweedie(link=sm.families.links.log(), + family_link = sm.families.Tweedie(link=sm.families.links.Log(), var_power=1.5) cls.res1 = sm.GLM(endog=data.endog, exog=data.exog[['rate_marriage', 'age', @@ -2048,11 +2048,11 @@ def setup_class(cls): cls.data = cpunish.load_pandas() cls.exog = cls.data.exog[['INCOME', 'SOUTH']] cls.endog = cls.data.endog - family1 = sm.families.Gaussian(link=sm.families.links.log()) + family1 = sm.families.Gaussian(link=sm.families.links.Log()) cls.res1 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], family=family1).fit() - family2 = sm.families.Tweedie(link=sm.families.links.log(), + family2 = sm.families.Tweedie(link=sm.families.links.Log(), var_power=0) cls.res2 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], @@ -2065,11 +2065,11 @@ def setup_class(cls): cls.data = cpunish.load_pandas() cls.exog = cls.data.exog[['INCOME', 'SOUTH']] cls.endog = cls.data.endog - family1 = sm.families.Poisson(link=sm.families.links.log()) + family1 = sm.families.Poisson(link=sm.families.links.Log()) cls.res1 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], family=family1).fit() - family2 = sm.families.Tweedie(link=sm.families.links.log(), + family2 = sm.families.Tweedie(link=sm.families.links.Log(), var_power=1) cls.res2 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], @@ -2082,11 +2082,11 @@ def setup_class(cls): cls.data = cpunish.load_pandas() cls.exog = cls.data.exog[['INCOME', 'SOUTH']] cls.endog = cls.data.endog - family1 = sm.families.Gamma(link=sm.families.links.log()) + family1 = sm.families.Gamma(link=sm.families.links.Log()) cls.res1 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], family=family1).fit() - family2 = sm.families.Tweedie(link=sm.families.links.log(), + family2 = sm.families.Tweedie(link=sm.families.links.Log(), var_power=2) cls.res2 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], @@ -2099,11 +2099,11 @@ def setup_class(cls): cls.data = cpunish.load_pandas() cls.exog = cls.data.exog[['INCOME', 'SOUTH']] cls.endog = cls.data.endog - family1 = sm.families.InverseGaussian(link=sm.families.links.log()) + family1 = sm.families.InverseGaussian(link=sm.families.links.Log()) cls.res1 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], family=family1).fit() - family2 = sm.families.Tweedie(link=sm.families.links.log(), + family2 = sm.families.Tweedie(link=sm.families.links.Log(), var_power=3) cls.res2 = sm.GLM(endog=cls.data.endog, exog=cls.data.exog[['INCOME', 'SOUTH']], @@ -2278,12 +2278,12 @@ def testTweediePowerEstimate(): 1.56354040e-09, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] model1 = sm.GLM(y, data.exog[['INCOME', 'SOUTH']], - family=sm.families.Tweedie(link=sm.families.links.log(), + family=sm.families.Tweedie(link=sm.families.links.Log(), var_power=1.5)) res1 = model1.fit() model2 = sm.GLM((y - res1.mu) ** 2, np.column_stack((np.ones(len(res1.mu)), np.log(res1.mu))), - family=sm.families.Gamma(sm.families.links.log())) + family=sm.families.Gamma(sm.families.links.Log())) res2 = model2.fit() # Sample may be too small for this... # assert_allclose(res1.scale, np.exp(res2.params[0]), rtol=0.25) diff --git a/statsmodels/genmod/tests/test_glm_weights.py b/statsmodels/genmod/tests/test_glm_weights.py --- a/statsmodels/genmod/tests/test_glm_weights.py +++ b/statsmodels/genmod/tests/test_glm_weights.py @@ -305,7 +305,7 @@ def setup_class(cls): data=data, family=sm.families.Tweedie( var_power=1.55, - link=sm.families.links.log() + link=sm.families.links.Log() ), var_weights=aweights ) @@ -326,7 +326,7 @@ def setup_class(cls): aweights[::5] = 5 aweights[::13] = 3 model = sm.GLM(endog, exog, - family=sm.families.Gamma(link=sm.families.links.log()), + family=sm.families.Gamma(link=sm.families.links.Log()), var_weights=aweights) cls.res1 = model.fit(rtol=1e-25, atol=0) cls.res2 = res_r.results_gamma_aweights_nonrobust @@ -355,7 +355,7 @@ def setup_class(cls): model = smf.glm( 'EXECUTIONS ~ INCOME + SOUTH - 1', data=data, - family=sm.families.Gaussian(link=sm.families.links.log()), + family=sm.families.Gaussian(link=sm.families.links.Log()), var_weights=aweights ) cls.res1 = model.fit(rtol=1e-25, atol=0) @@ -432,16 +432,16 @@ def test_wtd_gradient_irls(): fam = sm.families lnk = sm.families.links - families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, - lnk.cauchy]), - (fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]), - (fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]), - (fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]), - (fam.InverseGaussian, [lnk.log, lnk.identity, - lnk.inverse_power, - lnk.inverse_squared]), - (fam.NegativeBinomial, [lnk.log, lnk.inverse_power, - lnk.inverse_squared, lnk.identity])] + families = [(fam.Binomial, [lnk.Logit, lnk.Probit, lnk.CLogLog, lnk.Log, + lnk.Cauchy]), + (fam.Poisson, [lnk.Log, lnk.Identity, lnk.Sqrt]), + (fam.Gamma, [lnk.Log, lnk.Identity, lnk.InversePower]), + (fam.Gaussian, [lnk.Identity, lnk.Log, lnk.InversePower]), + (fam.InverseGaussian, [lnk.Log, lnk.Identity, + lnk.InversePower, + lnk.InverseSquared]), + (fam.NegativeBinomial, [lnk.Log, lnk.InversePower, + lnk.InverseSquared, lnk.Identity])] n = 100 p = 3 @@ -456,67 +456,67 @@ def test_wtd_gradient_irls(): if family_class != fam.Binomial and binom_version == 1: continue - elif family_class == fam.Binomial and link == lnk.cloglog: + elif family_class == fam.Binomial and link == lnk.CLogLog: # Cannot get gradient to converage with var_weights here continue - elif family_class == fam.Binomial and link == lnk.log: + elif family_class == fam.Binomial and link == lnk.Log: # Cannot get gradient to converage with var_weights here continue - elif (family_class, link) == (fam.Poisson, lnk.identity): + elif (family_class, link) == (fam.Poisson, lnk.Identity): lin_pred = 20 + exog.sum(1) - elif (family_class, link) == (fam.Binomial, lnk.log): + elif (family_class, link) == (fam.Binomial, lnk.Log): lin_pred = -1 + exog.sum(1) / 8 - elif (family_class, link) == (fam.Poisson, lnk.sqrt): + elif (family_class, link) == (fam.Poisson, lnk.Sqrt): lin_pred = -2 + exog.sum(1) - elif (family_class, link) == (fam.Gamma, lnk.log): + elif (family_class, link) == (fam.Gamma, lnk.Log): # Cannot get gradient to converge with var_weights here continue - elif (family_class, link) == (fam.Gamma, lnk.identity): + elif (family_class, link) == (fam.Gamma, lnk.Identity): # Cannot get gradient to converage with var_weights here continue - elif (family_class, link) == (fam.Gamma, lnk.inverse_power): + elif (family_class, link) == (fam.Gamma, lnk.InversePower): # Cannot get gradient to converage with var_weights here continue - elif (family_class, link) == (fam.Gaussian, lnk.log): + elif (family_class, link) == (fam.Gaussian, lnk.Log): # Cannot get gradient to converage with var_weights here continue - elif (family_class, link) == (fam.Gaussian, lnk.inverse_power): + elif (family_class, link) == (fam.Gaussian, lnk.InversePower): # Cannot get gradient to converage with var_weights here continue - elif (family_class, link) == (fam.InverseGaussian, lnk.log): + elif (family_class, link) == (fam.InverseGaussian, lnk.Log): # Cannot get gradient to converage with var_weights here lin_pred = -1 + exog.sum(1) continue elif (family_class, link) == (fam.InverseGaussian, - lnk.identity): + lnk.Identity): # Cannot get gradient to converage with var_weights here lin_pred = 20 + 5*exog.sum(1) lin_pred = np.clip(lin_pred, 1e-4, np.inf) continue elif (family_class, link) == (fam.InverseGaussian, - lnk.inverse_squared): + lnk.InverseSquared): lin_pred = 0.5 + exog.sum(1) / 5 continue # skip due to non-convergence elif (family_class, link) == (fam.InverseGaussian, - lnk.inverse_power): + lnk.InversePower): lin_pred = 1 + exog.sum(1) / 5 method = 'newton' elif (family_class, link) == (fam.NegativeBinomial, - lnk.identity): + lnk.Identity): lin_pred = 20 + 5*exog.sum(1) lin_pred = np.clip(lin_pred, 1e-3, np.inf) method = 'newton' elif (family_class, link) == (fam.NegativeBinomial, - lnk.inverse_squared): + lnk.InverseSquared): lin_pred = 0.1 + np.random.uniform(size=exog.shape[0]) continue # skip due to non-convergence elif (family_class, link) == (fam.NegativeBinomial, - lnk.inverse_power): + lnk.InversePower): # Cannot get gradient to converage with var_weights here lin_pred = 1 + exog.sum(1) / 5 continue - elif (family_class, link) == (fam.Gaussian, lnk.inverse_power): + elif (family_class, link) == (fam.Gaussian, lnk.InversePower): # adding skip because of convergence failure skip_one = True else: @@ -601,7 +601,7 @@ def setup_class(cls): beta = np.array([-1, 0.1, -0.05, .2, 0.35]) lin_pred = (exog * beta).sum(axis=1) family = sm.families.Poisson - link = sm.families.links.log + link = sm.families.links.Log endog = gen_endog(lin_pred, family, link) mod1 = sm.GLM(endog, exog, family=family(link=link())) cls.res1 = mod1.fit() @@ -632,7 +632,7 @@ def setup_class(cls): beta = np.array([-1, 0.1, -0.05, .2, 0.35]) lin_pred = (exog * beta).sum(axis=1) family = sm.families.Poisson - link = sm.families.links.log + link = sm.families.links.Log endog = gen_endog(lin_pred, family, link) mod1 = sm.GLM(endog, exog, family=family(link=link())) cls.res1 = mod1.fit() @@ -663,7 +663,7 @@ def setup_class(cls): beta = np.array([7, 0.1, -0.05, .2, 0.35]) lin_pred = (exog * beta).sum(axis=1) family = sm.families.Tweedie - link = sm.families.links.log + link = sm.families.links.Log endog = gen_endog(lin_pred, family, link) mod1 = sm.GLM(endog, exog, family=family(link=link(), var_power=1.5)) cls.res1 = mod1.fit(rtol=1e-20, atol=0, tol_criterion='params') @@ -695,7 +695,7 @@ def setup_class(cls): beta = np.array([7, 0.1, -0.05, .2, 0.35]) lin_pred = (exog * beta).sum(axis=1) family = sm.families.Tweedie - link = sm.families.links.log + link = sm.families.links.Log endog = gen_endog(lin_pred, family, link) mod1 = sm.GLM(endog, exog, family=family(link=link(), var_power=1.5)) cls.res1 = mod1.fit(rtol=1e-10, atol=0, tol_criterion='params', @@ -728,7 +728,7 @@ def setup_class(cls): beta = np.array([-1, 0.1, -0.05, .2, 0.35]) lin_pred = (exog * beta).sum(axis=1) family = sm.families.Binomial - link = sm.families.links.logit + link = sm.families.links.Logit endog = gen_endog(lin_pred, family, link, binom_version=0) mod1 = sm.GLM(endog, exog, family=family(link=link())) cls.res1 = mod1.fit(rtol=1e-10, atol=0, tol_criterion='params', @@ -761,7 +761,7 @@ def setup_class(cls): beta = np.array([-1, 0.1, -0.05, .2, 0.35]) lin_pred = (exog * beta).sum(axis=1) family = sm.families.Binomial - link = sm.families.links.logit + link = sm.families.links.Logit endog = gen_endog(lin_pred, family, link, binom_version=0) wt = np.random.randint(1, 5, n) mod1 = sm.GLM(endog, exog, family=family(link=link()), freq_weights=wt) @@ -855,7 +855,7 @@ def setup_class(cls): model = smf.glm( 'EXECUTIONS ~ INCOME + SOUTH - 1', data=data, - family=sm.families.Gaussian(link=sm.families.links.identity()), + family=sm.families.Gaussian(link=sm.families.links.Identity()), var_weights=aweights ) wlsmodel = smf.wls(
ENH: family: add option to turn of valid link check, or warn. see #1509 add option: `invalid_link : string in ['raise' 'warn', 'ignore']` to turn exception into warning or ignore needed to allow the GLM equivalent for Probit in #1870 #1509 also includes other options to improve how links are specified, that are not addressed here. Also, binomial should allow probit and CDFLink, which are also not included here.
@kshedden This will also change which links can be used in GEE by turning off the exception. test failure in GEE Multinomial (I only ran test_glm.py locally) ``` ====================================================================== ERROR: statsmodels.genmod.tests.test_gee.TestGEE.test_nominal ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/nose/case.py", line 197, in runTest self.test(*self.arg) File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/genmod/tests/test_gee.py", line 522, in test_nominal family = Multinomial(3) File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/genmod/generalized_estimating_equations.py", line 1946, in __init__ self.link = MultinomialLogit(self.ncut) File "/home/travis/miniconda/envs/statsmodels-test/lib/python2.7/site-packages/statsmodels-0.6.0-py2.7-linux-x86_64.egg/statsmodels/genmod/families/family.py", line 59, in _setlink invalid_link = self.invalid_link AttributeError: 'Multinomial' object has no attribute 'invalid_link' ``` [![Coverage Status](https://coveralls.io/builds/1067981/badge)](https://coveralls.io/builds/1067981) Coverage decreased (-0.02%) when pulling **723bdb8a51065257a101f01754bb124bfbc031c6 on josef-pkt:REF_genmod_link_check** into **2e806fc4203d528a17307f4fb08ab8bfe7ee43dd on statsmodels:master**. about naming: `invalid_link` doesn't indicate that it is a flag, #2212 has `safe_links` as a list of links which sounds similar aside: CDFLink is valid for Binomial family because CDFLink is a subclass of Logit, so isinstance with Logit is true. trying to rebase this too many merge conflict, all `__init__` `__init__` was refactored in main since this PR was opened. AFAICS, it will be faster starting a new PR from scratch instead of editing all merge conflicts only planned change is to use `check_link` as keyword and flag.
"2022-12-08T22:15:25Z"
0.13
[ "statsmodels/genmod/tests/test_gee_glm.py::TestComparePoisson::test_resid", "statsmodels/genmod/tests/test_gee_glm.py::TestComparePoisson::test_basic", "statsmodels/genmod/tests/test_gee_glm.py::TestCompareLogit::test_resid", "statsmodels/genmod/tests/test_gee_glm.py::TestCompareLogit::test_basic", "statsmodels/genmod/tests/test_gee_glm.py::TestCompareGaussian::test_resid", "statsmodels/genmod/tests/test_gee_glm.py::TestCompareGaussian::test_basic", "statsmodels/genmod/tests/test_gee_glm.py::TestCompareGamma::test_basic", "statsmodels/genmod/tests/test_gee_glm.py::TestCompareGamma::test_resid", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestRegularized::test_regularized", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_prediction", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_mu", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog_formula", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution_binom_count", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_endog_dtype", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_prediction", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_r", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_init_kwargs", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_bic", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_mu", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary2", "statsmodels/genmod/tests/test_glm.py::test_score_test_ols", "statsmodels/genmod/tests/test_glm.py::test_perfect_pred", "statsmodels/genmod/tests/test_glm.py::test_attribute_writable_resettable", "statsmodels/genmod/tests/test_glm.py::test_glm_start_params", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls_eim", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls", "statsmodels/genmod/tests/test_glm.py::test_summary", "statsmodels/genmod/tests/test_glm.py::test_formula_missing_exposure", "statsmodels/genmod/tests/test_glm.py::test_glm_irls_method", "statsmodels/genmod/tests/test_glm.py::test_loglike_no_opt", "statsmodels/genmod/tests/test_glm.py::test_plots", "statsmodels/genmod/tests/test_glm.py::test_wtd_patsy_missing", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL", "statsmodels/genmod/tests/test_glm.py::test_tweedie_elastic_net", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_poisson_limit", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_upper_limit", "statsmodels/genmod/tests/test_glm.py::testTweediePowerEstimate", "statsmodels/genmod/tests/test_glm.py::test_glm_lasso_6431", "statsmodels/genmod/tests/test_glm.py::test_int_scale", "statsmodels/genmod/tests/test_glm.py::test_glm_bic", "statsmodels/genmod/tests/test_glm.py::test_poisson_deviance", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int32]", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int16]", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int8]", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int64]", "statsmodels/genmod/tests/test_glm.py::test_glm_bic_warning", "statsmodels/genmod/tests/test_glm.py::test_output_exposure_null", "statsmodels/genmod/tests/test_glm.py::test_non_invertible_hessian_fails_summary", "statsmodels/genmod/tests/test_glm.py::test_qaic", "statsmodels/genmod/tests/test_glm.py::test_tweedie_score", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_scale", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_bic", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_params", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_missing", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_predict", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_offset_exposure", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_missing", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_mu", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_fittedvalues", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link15-link25-inverse_deriv]", "statsmodels/genmod/families/tests/test_link.py::test_invlogit_stability", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link14-link24-inverse]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link12-link22-deriv]", "statsmodels/genmod/families/tests/test_link.py::test_inverse", "statsmodels/genmod/families/tests/test_link.py::test_deriv", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link15-link25-__call__]", "statsmodels/genmod/families/tests/test_link.py::test_inverse_deriv", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link13-link23-deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link11-link21-__call__]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link14-link24-inverse_deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link10-link20-deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_inverse_deriv2", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link15-link25-inverse]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link14-link24-__call__]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link12-link22-__call__]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link11-link21-deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link11-link21-deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link14-link24-deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link13-link23-inverse_deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link15-link25-deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link15-link25-inverse_deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link12-link22-deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link11-link21-inverse]", "statsmodels/genmod/families/tests/test_link.py::test_deriv2", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link13-link23-inverse_deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link13-link23-deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link10-link20-__call__]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link12-link22-inverse]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link10-link20-inverse]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link10-link20-inverse_deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link13-link23-inverse]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link11-link21-inverse_deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link10-link20-inverse_deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link12-link22-inverse_deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link13-link23-__call__]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link11-link21-inverse_deriv2]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link10-link20-deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link14-link24-inverse_deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link15-link25-deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link12-link22-inverse_deriv]", "statsmodels/genmod/families/tests/test_link.py::test_cdflink[link14-link24-deriv2]", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedNegBinSt::test_predict", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedNegBinSt::test_basic", "statsmodels/discrete/tests/test_truncated_model.py::TestHurdleNegbinSimulated::test_basic", "statsmodels/discrete/tests/test_truncated_model.py::TestHurdleNegbinSimulated::test_predict", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedNBPModel::test_params", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedNBPModel::test_fit_regularized", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedNBPModel::test_llf", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedNBPModel::test_aic", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedNBPModel::test_bic", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedNBPModel::test_bse", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedNBPModel::test_conf_int", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonModel::test_fit_regularized", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonModel::test_llf", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonModel::test_aic", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonModel::test_params", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonModel::test_bse", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonModel::test_bic", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonModel::test_conf_int", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoisson1St::test_predict", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoisson1St::test_basic", "statsmodels/discrete/tests/test_truncated_model.py::TestHurdlePoissonR::test_predict", "statsmodels/discrete/tests/test_truncated_model.py::TestHurdlePoissonR::test_basic", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonSt::test_basic", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoissonSt::test_predict", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoisson_predict::test_mean", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoisson_predict::test_predict_prob", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedLFPoisson_predict::test_var", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedLFPoissonModel::test_params", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedLFPoissonModel::test_bic", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedLFPoissonModel::test_conf_int", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedLFPoissonModel::test_bse", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedLFPoissonModel::test_llf", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedLFPoissonModel::test_fit_regularized", "statsmodels/discrete/tests/test_truncated_model.py::TestZeroTruncatedLFPoissonModel::test_aic", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedNBP_predict::test_predict_prob", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedNBP_predict::test_var", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedNBP_predict::test_mean", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedNegBin1St::test_predict", "statsmodels/discrete/tests/test_truncated_model.py::TestTruncatedNegBin1St::test_basic", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_score_test_alpha", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_predict", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_influence", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_score_test", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_diagnostic", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_basic", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_predict_linear", "statsmodels/discrete/tests/test_predict.py::test_distr[case12]", "statsmodels/discrete/tests/test_predict.py::test_distr[case11]", "statsmodels/discrete/tests/test_predict.py::test_distr[case14]", "statsmodels/discrete/tests/test_predict.py::test_distr[case9]", "statsmodels/discrete/tests/test_predict.py::test_distr[case10]", "statsmodels/discrete/tests/test_predict.py::test_distr[case7]", "statsmodels/discrete/tests/test_predict.py::test_distr[case3]", "statsmodels/discrete/tests/test_predict.py::test_distr[case1]", "statsmodels/discrete/tests/test_predict.py::test_distr[case4]", "statsmodels/discrete/tests/test_predict.py::test_distr[case5]", "statsmodels/discrete/tests/test_predict.py::test_distr[case2]", "statsmodels/discrete/tests/test_predict.py::test_distr[case13]", "statsmodels/discrete/tests/test_predict.py::test_distr[case8]", "statsmodels/discrete/tests/test_predict.py::test_distr[case6]", "statsmodels/discrete/tests/test_predict.py::test_distr[case0]", "statsmodels/discrete/tests/test_predict.py::TestZINegativeBinomialPPredict::test_predict", "statsmodels/discrete/tests/test_predict.py::TestZINegativeBinomialPPredict::test_basic", "statsmodels/discrete/tests/test_predict.py::TestZINegativeBinomialPPredict::test_diagnostic", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_influence", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_score_test_alpha", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_score_test", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_predict_linear", "statsmodels/genmod/families/tests/test_family.py::test_family_link[Gamma-links2]", "statsmodels/genmod/families/tests/test_family.py::test_family_link[Tweedie-links6]", "statsmodels/genmod/families/tests/test_family.py::test_invalid_family_link[Gaussian-links1]", "statsmodels/genmod/families/tests/test_family.py::test_invalid_family_link[Poisson-links0]", "statsmodels/genmod/families/tests/test_family.py::test_family_link[NegativeBinomial-links5]", "statsmodels/genmod/families/tests/test_family.py::test_family_link[InverseGaussian-links4]", "statsmodels/genmod/families/tests/test_family.py::test_family_link[Poisson-links0]", "statsmodels/genmod/families/tests/test_family.py::test_invalid_family_link[Tweedie-links6]", "statsmodels/genmod/families/tests/test_family.py::test_family_link[Gaussian-links1]", "statsmodels/genmod/families/tests/test_family.py::test_family_link[Binomial-links3]", "statsmodels/genmod/families/tests/test_family.py::test_invalid_family_link[InverseGaussian-links4]", "statsmodels/genmod/families/tests/test_family.py::test_tweedie_loglike_obs[1.9]", "statsmodels/genmod/families/tests/test_family.py::test_tweedie_loglike_obs[1.1]", "statsmodels/genmod/families/tests/test_family.py::test_tweedie_loglike_obs[1.5]", "statsmodels/genmod/families/tests/test_family.py::test_invalid_family_link[Binomial-links3]", "statsmodels/genmod/families/tests/test_family.py::test_invalid_family_link[NegativeBinomial-links5]", "statsmodels/genmod/families/tests/test_family.py::test_invalid_family_link[Gamma-links2]", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwClu::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwClu::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwClu::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwClu::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwClu::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsDuplicated::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsDuplicated::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsDuplicated::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsDuplicated::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsDuplicated::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwNr::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwNr::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwNr::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwNr::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwNr::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAverage::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAverage::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAverage::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAverage::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAverage::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianWLS::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianWLS::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianWLS::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianWLS::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianWLS::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGammaAwNr::test_r_llf", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGammaAwNr::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGammaAwNr::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGammaAwNr::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGammaAwNr::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGammaAwNr::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAverage::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAverage::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAverage::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAverage::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAverage::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianAwNr::test_r_llf", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianAwNr::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianAwNr::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianAwNr::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianAwNr::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmGaussianAwNr::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmTweedieAwNr::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmTweedieAwNr::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmTweedieAwNr::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmTweedieAwNr::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmTweedieAwNr::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAggregated::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAggregated::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAggregated::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAggregated::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestRepeatedvsAggregated::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::test_wtd_gradient_irls", "statsmodels/genmod/tests/test_glm_weights.py::test_weights_different_formats[list]", "statsmodels/genmod/tests/test_glm_weights.py::test_weights_different_formats[ndarray]", "statsmodels/genmod/tests/test_glm_weights.py::test_warnings_raised", "statsmodels/genmod/tests/test_glm_weights.py::test_weights_different_formats[Series]", "statsmodels/genmod/tests/test_glm_weights.py::test_poisson_residuals", "statsmodels/genmod/tests/test_glm_weights.py::test_incompatible_input", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPlain::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPlain::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPlain::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPlain::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPlain::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPwNr::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPwNr::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonPwNr::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsAverage::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsAverage::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsAverage::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsAverage::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomial0RepeatedvsAverage::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwNr::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwNr::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwNr::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwNr::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwNr::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwHC::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwHC::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwHC::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwHC::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonAwHC::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAggregated::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAggregated::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAggregated::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAggregated::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestTweedieRepeatedvsAggregated::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomialVsVarWeights::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomialVsVarWeights::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomialVsVarWeights::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomialVsVarWeights::test_compare_optimizers", "statsmodels/genmod/tests/test_glm_weights.py::TestBinomialVsVarWeights::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwHC::test_pearson_chi2", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwHC::test_residuals", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwHC::test_basic", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwHC::test_getprediction", "statsmodels/genmod/tests/test_glm_weights.py::TestGlmPoissonFwHC::test_compare_optimizers" ]
[ "statsmodels/genmod/families/tests/test_family.py::test_family_link_check[Binomial-links3]", "statsmodels/genmod/families/tests/test_family.py::test_family_link_check[Gaussian-links1]", "statsmodels/genmod/families/tests/test_family.py::test_family_link_check[Gamma-links2]", "statsmodels/genmod/families/tests/test_family.py::test_family_link_check[InverseGaussian-links4]", "statsmodels/genmod/families/tests/test_family.py::test_family_link_check[Tweedie-links6]", "statsmodels/genmod/families/tests/test_family.py::test_family_link_check[NegativeBinomial-links5]", "statsmodels/genmod/families/tests/test_family.py::test_family_link_check[Poisson-links0]" ]
Python
[]
[]
statsmodels/statsmodels
8,623
statsmodels__statsmodels-8623
[ "8619" ]
74bd3d6af477d396c54c82e525837156ca1a7ada
diff --git a/statsmodels/stats/multitest.py b/statsmodels/stats/multitest.py --- a/statsmodels/stats/multitest.py +++ b/statsmodels/stats/multitest.py @@ -60,7 +60,9 @@ def _ecdf(x): for a in m[1:]: multitest_alias[a] = m[0] -def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False, +def multipletests(pvals, alpha=0.05, method='hs', + maxiter=1, + is_sorted=False, returnsorted=False): """ Test results and p-value correction for multiple tests @@ -86,6 +88,13 @@ def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False, - `fdr_tsbh` : two stage fdr correction (non-negative) - `fdr_tsbky` : two stage fdr correction (non-negative) + maxiter : int or bool + Maximum number of iterations for two-stage fdr, `fdr_tsbh` and + `fdr_tsbky`. It is ignored by all other methods. + maxiter=1 (default) corresponds to the two stage method. + maxiter=-1 corresponds to full iterations which is maxiter=len(pvals). + maxiter=0 uses only a single stage fdr correction using a 'bh' or 'bky' + prior fraction of assumed true hypotheses. is_sorted : bool If False (default), the p_values will be sorted, but the corrected pvalues are in the original order. If True, then it assumed that the @@ -228,11 +237,13 @@ def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False, # delegate, call with sorted pvals reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha, method='bky', + maxiter=maxiter, is_sorted=True)[:2] elif method.lower() in ['fdr_tsbh', 'fdr_2sbh']: # delegate, call with sorted pvals reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha, method='bh', + maxiter=maxiter, is_sorted=True)[:2] elif method.lower() in ['fdr_gbs']: @@ -356,7 +367,9 @@ def fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False): return reject, pvals_corrected -def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False, +def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', + maxiter=1, + iter=None, is_sorted=False): '''(iterated) two stage linear step-up procedure with estimation of number of true hypotheses @@ -376,7 +389,23 @@ def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False, and Yekuteli 2006 * 'bh' - the two stage method of Benjamini and Hochberg + maxiter : int or bool + Maximum number of iterations. + maxiter=1 (default) corresponds to the two stage method. + maxiter=-1 corresponds to full iterations which is maxiter=len(pvals). + maxiter=0 uses only a single stage fdr correction using a 'bh' or 'bky' + prior fraction of assumed true hypotheses. + Boolean maxiter is allowed for backwards compatibility with the + deprecated ``iter`` keyword. + maxiter=False is two-stage fdr (maxiter=1) + maxiter=True is full iteration (maxiter=-1 or maxiter=len(pvals)) + iter : bool + ``iter`` is deprecated use ``maxiter`` instead. + If iter is True, then only one iteration step is used, this is the + two-step method. + If iter is False, then iterations are stopped at convergence which + occurs in a finite number of steps (at most len(pvals) steps). Returns ------- @@ -385,7 +414,7 @@ def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False, pvalue-corrected : ndarray pvalues adjusted for multiple hypotheses testing to limit FDR m0 : int - ntest - rej, estimated number of true hypotheses + ntest - rej, estimated number of true (not rejected) hypotheses alpha_stages : list of floats A list of alphas that have been used at each stage @@ -411,6 +440,18 @@ def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False, ''' pvals = np.asarray(pvals) + if iter is not None: + import warnings + msg = "iter keyword is deprecated, use maxiter keyword instead." + warnings.warn(msg, FutureWarning) + + if iter is False: + maxiter = 1 + elif iter is True or maxiter in [-1, None] : + maxiter = len(pvals) + # otherwise we use maxiter + + if not is_sorted: pvals_sortind = np.argsort(pvals) pvals = np.take(pvals, pvals_sortind) @@ -430,30 +471,36 @@ def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False, is_sorted=True) r1 = rej.sum() if (r1 == 0) or (r1 == ntests): - return rej, pvalscorr * fact, ntests - r1, alpha_stages - ri_old = r1 - - while True: - ntests0 = 1.0 * ntests - ri_old - alpha_star = alpha_prime * ntests / ntests0 - alpha_stages.append(alpha_star) - #print ntests0, alpha_star - rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_star, method='indep', - is_sorted=True) - ri = rej.sum() - if (not iter) or ri == ri_old: - break - elif ri < ri_old: - # prevent cycles and endless loops - raise RuntimeError(" oops - should not be here") - ri_old = ri - - # make adjustment to pvalscorr to reflect estimated number of Non-Null cases - # decision is then pvalscorr < alpha (or <=) - pvalscorr *= ntests0 * 1.0 / ntests - if method == 'bky': - pvalscorr *= (1. + alpha) - + # return rej, pvalscorr * fact, ntests - r1, alpha_stages + reject = rej + pvalscorr *= fact + ri = r1 + else: + ri_old = ri = r1 + ntests0 = ntests # needed if maxiter=0 + # while True: + for it in range(maxiter): + ntests0 = 1.0 * ntests - ri_old + alpha_star = alpha_prime * ntests / ntests0 + alpha_stages.append(alpha_star) + #print ntests0, alpha_star + rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_star, method='indep', + is_sorted=True) + ri = rej.sum() + if (it >= maxiter - 1) or ri == ri_old: + break + elif ri < ri_old: + # prevent cycles and endless loops + raise RuntimeError(" oops - should not be here") + ri_old = ri + + # make adjustment to pvalscorr to reflect estimated number of Non-Null cases + # decision is then pvalscorr < alpha (or <=) + pvalscorr *= ntests0 * 1.0 / ntests + if method == 'bky': + pvalscorr *= (1. + alpha) + + pvalscorr[pvalscorr>1] = 1 if not is_sorted: pvalscorr_ = np.empty_like(pvalscorr) pvalscorr_[pvals_sortind] = pvalscorr
diff --git a/statsmodels/stats/tests/test_multi.py b/statsmodels/stats/tests/test_multi.py --- a/statsmodels/stats/tests/test_multi.py +++ b/statsmodels/stats/tests/test_multi.py @@ -326,6 +326,75 @@ def test_fdr_bky(): assert_almost_equal([0.047619, 0.0649], res_tst[-1][:2], 3) assert_equal(8, res_tst[0].sum()) + # reference number from Prism, see #8619 + res2 = np.array([ + 0.0012, 0.0023, 0.0073, 0.0274, 0.0464, 0.0492, 0.0492, 0.0497, + 0.0589, 0.3742, 0.4475, 0.5505, 0.5800, 0.6262, 0.77 + ]) + assert_allclose(res_tst[1], res2, atol=6e-5) + + # issue #8619, problems if no or all rejected, ordering + pvals = np.array([0.2, 0.8, 0.3, 0.5, 1]) + res1 = fdrcorrection_twostage(pvals, alpha=0.05, method='bky') + res2 = multipletests(pvals, alpha=0.05, method='fdr_tsbky') + assert_equal(res1[0], res2[0]) + assert_allclose(res1[1], res2[1], atol=6e-5) + # confirmed with Prism + res_pv = np.array([0.7875, 1., 0.7875, 0.875 , 1.]) + assert_allclose(res1[1], res_pv, atol=6e-5) + + +def test_fdr_twostage(): + # test for iteration in fdrcorrection_twostage, new maxiter + # example from BKY + pvals = [ + 0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459, + 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000] + n = len(pvals) + + # bh twostage fdr + k = 0 + # same pvalues as one-stage fdr + res0 = multipletests(pvals, alpha=0.05, method='fdr_bh') + res1 = fdrcorrection_twostage(pvals, alpha=0.05, method='bh', maxiter=k, + iter=None) + res2 = multipletests(pvals, alpha=0.05, method='fdr_tsbh', maxiter=k) + assert_allclose(res1[1], res0[1]) + assert_allclose(res2[1], res1[1]) + + k = 1 + # pvalues corrected by first stage number of rejections + res0 = multipletests(pvals, alpha=0.05, method='fdr_bh') + res1 = fdrcorrection_twostage(pvals, alpha=0.05, method='bh', maxiter=k, + iter=None) + res2 = multipletests(pvals, alpha=0.05, method='fdr_tsbh', maxiter=k) + res3 = multipletests(pvals, alpha=0.05, method='fdr_tsbh') + assert_allclose(res1[1], res0[1] * (1 - res0[0].sum() / n)) + assert_allclose(res2[1], res1[1]) + assert_allclose(res3[1], res1[1]) # check default maxiter + + # bky has an extra factor 1+alpha in fdr twostage independent of iter + fact = 1 + 0.05 + k = 0 + # same pvalues as one-stage fdr + res0 = multipletests(pvals, alpha=0.05, method='fdr_bh') + res1 = fdrcorrection_twostage(pvals, alpha=0.05, method='bky', maxiter=k, + iter=None) + res2 = multipletests(pvals, alpha=0.05, method='fdr_tsbky', maxiter=k) + assert_allclose(res1[1], np.clip(res0[1] * fact, 0, 1)) + assert_allclose(res2[1], res1[1]) + + k = 1 + # pvalues corrected by first stage number of rejections + res0 = multipletests(pvals, alpha=0.05, method='fdr_bh') + res1 = fdrcorrection_twostage(pvals, alpha=0.05, method='bky', maxiter=k, + iter=None) + res2 = multipletests(pvals, alpha=0.05, method='fdr_tsbky', maxiter=k) + res3 = multipletests(pvals, alpha=0.05, method='fdr_tsbky') + assert_allclose(res1[1], res0[1] * (1 - res0[0].sum() / n) * fact) + assert_allclose(res2[1], res1[1]) + assert_allclose(res3[1], res1[1]) # check default maxiter + @pytest.mark.parametrize('method', sorted(multitest_methods_names)) def test_issorted(method):
BUG: statsmodels.stats.multitest.fdrcorrection_twostage sometimes returns values in wrong order and gives values above 1 test case: ``` >>> import statsmodels.stats.multitest >>> import numpy as np >>> test=np.array([0.2, 0.8, 0.3, 0.5, 1]) >>> statsmodels.stats.multitest.fdrcorrection_twostage(test, alpha=0.05, method='bky')[1] array([0.7875, 0.7875, 0.875 , 1.05 , 1.05 ]) >>> statsmodels.stats.multitest.multipletests(test, alpha=0.05, method='fdr_tsbky')[1] array([0.7875, 1. , 0.7875, 0.875 , 1. ]) ``` There are two issues with `fdrcorrection_twostage` 1. it sometimes but not always returns results in sorted order, which does not mach input order. The behavior is inconsistent and depends on values in the input array 2. it returns values above 1 results from `fdrcorrection_twostage` and `multipletests` should match but only results from `multipletests` seems to be correct compared to results from Prism
Thanks for reporting. Looks like a bug, but it's not clear to me yet why. It has the same rearranging code like fdrcorrection and multipletests. if there is one rejection, then it seems to work ``` test=np.array([0.2, 0.8, 0.3, 0.5, 0.9, 1e-3]) res1 = statsmodels.stats.multitest.fdrcorrection_twostage(test, alpha=0.05, method='bky')[1] print(test) res2 = statsmodels.stats.multitest.multipletests(test, alpha=0.05, method='fdr_tsbky')[1] res1, res2 [0.2 0.8 0.3 0.5 0.9 0.001] (array([0.525 , 0.7875 , 0.525 , 0.65625, 0.7875 , 0.00525]), array([0.525 , 0.7875 , 0.525 , 0.65625, 0.7875 , 0.00525])) ``` found it: if no rejection, then there is an early return that doesn't reorder the result ``` r1 = rej.sum() if (r1 == 0) or (r1 == ntests): return rej, pvalscorr * fact, ntests - r1, alpha_stages ``` Also it looks like `multipletests` does not support `iter` keyword for fdr twostage. aside: both fdrcorrection method always reorder if not is_sorted, no option for turning it off as in multipletest function. @nevubm Can you check some additional results with Prism, if you have that? a current unit test uses the following uncorrected pvalues pvals = [ 0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459, 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000] When I wrote this function, there were no corrected p-values for the two-stage fdr available in other packages or reference articles. The original paper only had the rejection decision and I derived the computation for corrected p-values myself. fdr_tsbh has unit tests for corrected pvalues in TestMultiTests3 and TestMultiTests4 fdr_tsbky only has unit test for rejection count and for alpha-star (in test_fdr_bky ) possible fixes - make large if else block, i.e. iterate in else block, or - keep early return and write helper function for reordering of return arrays the first is cleaner if we want to change the return, e.g. to HolderTuple, but maybe we only change the return of `multipletests`. other possible changes - `iter` is reserved word, rename to `iterate` - add `fdrts_iterate` (or similar name) keyword to `multipletests` and forward to fdr two-stage - privatize the two fdr functions, I guess not > Can you check some additional results with Prism, if you have that? > > a current unit test uses the following uncorrected pvalues pvals = [ 0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459, 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000] This is the output from Prism 9.4.1, using two stage BKY P value | q value -- | -- 0.0001 | 0.0012 0.0004 | 0.0023 0.0019 | 0.0073 0.0095 | 0.0274 0.0201 | 0.0464 0.0278 | 0.0492 0.0298 | 0.0492 0.0344 | 0.0497 0.0459 | 0.0589 0.3240 | 0.3742 0.4262 | 0.4475 0.5719 | 0.5505 0.6528 | 0.5800 0.7590 | 0.6262 1.0000 | 0.7700 For the unit tests, it would be a good idea to shuffle the input order a couple times instead of feeding it in sorted order, to check the output order. Also, are the values above 1 related to the sorting or is that a different issue? Since you're already thinking about this, I have a question. Having separate `fdrcorrection_twostage`, `fdrcorrection`, and `multipletests` that do a lot of the same thing was always a bit confusing for me. Is not the main difference that `fdrcorrection_twostage` can do more than 2 stages with `iter`? And `fdrcorrection` seems fully redundant with `multipletests`. Would it be possible to combine these functions into a single function in a way that makes sense? Thanks for the reference numbers I have not yet checked why pvalue can be >1 The `fdrcorrection` function uses `pvals_corrected[pvals_corrected>1] = 1` but I don't see yet why pvalues can still be >1 ``` test=np.array([0.2, 0.8, 0.3, 0.5, 0.9999999]) res1 = statsmodels.stats.multitest.fdrcorrection_twostage(test, alpha=0.05, method='bky')[1] print(test) res2 = statsmodels.stats.multitest.multipletests(test, alpha=0.05, method='fdr_tsbky')[1] res1, res2 [0.2 0.8 0.3 0.5 0.9999999] (array([0.7875 , 0.7875 , 0.875 , 1.0499999, 1.0499999]), array([0.7875, 1. , 0.7875, 0.875 , 1. ])) ``` **update** the call to fdrcorrection if the two-stage function return <=1, however two-stage still has a multiplicative `fact` > 1 in return ``` if (r1 == 0) or (r1 == ntests): return rej, pvalscorr * fact, ntests - r1, alpha_stages ``` There is also factor in the final return if only some tests are rejected, so there should be cases with pvalue > 1 also in that path ``` if method == 'bky': pvalscorr *= (1. + alpha) ``` `multipletests` function has `pvals_corrected[pvals_corrected>1] = 1` for all methods and will never return pvalue > 1, it's only the two-stage fdr function than can have it. > Having separate fdrcorrection_twostage, fdrcorrection, and multipletests that do a lot of the same thing `multipletests` delegates to the two fdrcorrection functions. I split those out because those computation are much longer than the non-fdr methods. Essentially, `multipletests` is supposed to be the main public function, the fdrcorrection functions are implementation details. But because those are not private (no leading underscore), we ended up adding the supporting code like sorting, reordering and pvalue>1 checks to all three functions (and need to finish that for the two-stage fdr). The multiple testing functions are pretty old. Because this bug hasn't been reported yet, I guess that most users use `multipletests` which does not have the bugs. After we add the `iter` option for two-stage fdr to multipletests, then there is no reason anymore to use the fdrcorrection functions directly. Aside: if `is_sorted=True`, then most of the extra code like sorting and reordering is not used. So internally we don't duplicate those computations when multipletests calls an fdrcorrection function. **update** We should add a comment to the docstring of the fdrcorrection functions to indicate that the main function is `multipletests` and check we have the `See Also`. Similar problem for rates etest https://www.statsmodels.org/dev/generated/statsmodels.stats.rates.etest_poisson_2indep.html In that case, the main function `test_poisson_2indep` uses `etest_kwds=None` so users have no reason to call the etest function directly. related: should we have `maxiter` (int or boolean) instead of `iter` (boolean)? Current `iter` is binary, either no iteration or iterate to convergence. Loop has at most ntests iterations, AFAICS, possible ri=0,...,ntests. It would be possible to allow just one or two iterations using maxiter. e.g. If maxiter is in {True, False}, then current no are complete iteration If maxiter is int or not boolean, then stop at maxiter unless already converged before maxiter (as in optimization loops) `maxiter` seems more complete and flexible, and you can have `maxiter=-1` for no limit/until convergence, `0` for no iteration (default), and any other integer specifies number of iterations another problem that currently the twostage fdrcorrection function is not redundant: multipletest does not return 2 extra returns from fdrcorrection: m0 estimated number of true hypotheses and alphastages. m0 can be inferred from other returns, but not alphastages need holdertuple to allow for extra returns for more iteration, then twostage, the number of iterations can be inferred from len(alphastages). Given that we only have a finite number of possible iteration, a `converged` indicator is not needed because we always converge (endless loops are ruled out by code, number of rejections can only increase, raises exception if it decreases) If we switch to holdertuple also in fdrcorrection_twostage, then we could also return `ri` list, i.e. stages of number of rejections in iterations.
"2023-01-19T17:57:21Z"
0.13
[ "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[rawp-val8]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests3::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests4::test_multi_pvalcorrection_rmethods[rawp-val8]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[rawp-val8]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests2::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[Bonferroni-val2]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[Holm-val4]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[rawp-val8]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[Hochberg-val3]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[SidakSD-val5]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[BY-val1]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[BH-val0]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[TSBH_0.05-val7]", "statsmodels/stats/tests/test_multi.py::TestMultiTests1::test_multi_pvalcorrection_rmethods[SidakSS-val6]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[sh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_local_fdr", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-True-False]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[b]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_issorted[s]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_bh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[ho]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_gbs]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_issorted[b]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_by]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-True-False]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_tsbh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_issorted[h]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_gbs]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[s]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[hs]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-False-False]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_tukeyhsd", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-False-False]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-False-True]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[h]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-True-True]", "statsmodels/stats/tests/test_multi.py::test_hommel", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_tsbh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_null_distribution", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[False-True-True]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_issorted[hs]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_tsbky]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_n-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-s-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_issorted[ho]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_bh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_tsbky-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-sh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_issorted[sh]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_gbs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hommel-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_i-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-h-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-sh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-fdr_n-0.01]", "statsmodels/stats/tests/test_multi.py::test_floating_precision[fdr_tsbky]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_i-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[7-b-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hommel-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-fdr_tsbh-0.1]", "statsmodels/stats/tests/test_multi.py::test_issorted[fdr_by]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_tsbky-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_null_constrained[True-False-True]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-b-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[5-fdr_i-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[4-b-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[1-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[10-hs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_tsbh-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-h-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[2-hommel-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-sh-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_gbs-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-s-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[8-fdr_n-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-s-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[6-fdr_gbs-0.01]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-hs-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[3-fdr_tsbky-0.1]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[9-h-0.05]", "statsmodels/stats/tests/test_multi.py::test_pvalcorrection_reject[0-fdr_gbs-0.01]" ]
[ "statsmodels/stats/tests/test_multi.py::test_fdr_bky", "statsmodels/stats/tests/test_multi.py::test_fdr_twostage" ]
Python
[]
[]
statsmodels/statsmodels
8,648
statsmodels__statsmodels-8648
[ "8641" ]
6d5f7461e7e25456fd38f032d3028945f5a54a82
diff --git a/statsmodels/discrete/count_model.py b/statsmodels/discrete/count_model.py --- a/statsmodels/discrete/count_model.py +++ b/statsmodels/discrete/count_model.py @@ -369,7 +369,7 @@ def hessian(self, params): def predict(self, params, exog=None, exog_infl=None, exposure=None, offset=None, which='mean', y_values=None): """ - Predict response variable or other statistic given exogenous variables. + Predict expected response or other statistic given exogenous variables. Parameters ---------- @@ -396,8 +396,8 @@ def predict(self, params, exog=None, exog_infl=None, exposure=None, which : str (optional) Statitistic to predict. Default is 'mean'. - - 'mean' : the conditional expectation of endog E(y | x), - i.e. exp of linear predictor. + - 'mean' : the conditional expectation of endog E(y | x). This + takes inflated zeros into account. - 'linear' : the linear predictor of the mean function. - 'var' : returns the estimated variance of endog implied by the model. diff --git a/statsmodels/distributions/copula/transforms.py b/statsmodels/distributions/copula/transforms.py --- a/statsmodels/distributions/copula/transforms.py +++ b/statsmodels/distributions/copula/transforms.py @@ -14,7 +14,6 @@ from scipy.special import expm1, gamma -# not used yet class Transforms: def __init__(self): diff --git a/statsmodels/genmod/families/family.py b/statsmodels/genmod/families/family.py --- a/statsmodels/genmod/families/family.py +++ b/statsmodels/genmod/families/family.py @@ -34,6 +34,10 @@ class Family: variance : a variance function Measures the variance as a function of the mean probabilities. See the individual families for the default variance function. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. See Also -------- @@ -383,6 +387,10 @@ class Poisson(Family): The default link for the Poisson family is the log link. Available links are log, identity, and sqrt. See statsmodels.families.links for more information. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. Attributes ---------- @@ -535,6 +543,10 @@ class Gaussian(Family): The default link for the Gaussian family is the identity link. Available links are log, identity, and inverse. See statsmodels.genmod.families.links for more information. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. Attributes ---------- @@ -705,6 +717,10 @@ class Gamma(Family): The default link for the Gamma family is the inverse link. Available links are log, identity, and inverse. See statsmodels.genmod.families.links for more information. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. Attributes ---------- @@ -867,6 +883,10 @@ class Binomial(Family): The default link for the Binomial family is the logit link. Available links are logit, probit, cauchy, log, loglog, and cloglog. See statsmodels.genmod.families.links for more information. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. Attributes ---------- @@ -1136,6 +1156,10 @@ class InverseGaussian(Family): inverse squared link. Available links are InverseSquared, Inverse, Log, and Identity. See statsmodels.genmod.families.links for more information. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. Attributes ---------- @@ -1299,6 +1323,10 @@ class NegativeBinomial(Family): The ancillary parameter for the negative binomial distribution. For now ``alpha`` is assumed to be nonstochastic. The default value is 1. Permissible values are usually assumed to be between .01 and 2. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. Attributes ---------- @@ -1518,6 +1546,10 @@ class Tweedie(Family): likelihood is used. In both cases, for likelihood computations the var_power must be between 1 and 2. + check_link : bool + If True (default), then and exception is raised if the link is invalid + for the family. + If False, then the link is not checked. Attributes ---------- diff --git a/statsmodels/genmod/generalized_linear_model.py b/statsmodels/genmod/generalized_linear_model.py --- a/statsmodels/genmod/generalized_linear_model.py +++ b/statsmodels/genmod/generalized_linear_model.py @@ -218,6 +218,10 @@ class GLM(base.LikelihoodModel): Notes ----- + Note: PerfectSeparationError exception has been converted to a + PerfectSeparationWarning and perfect separation or perfect prediction will + not raise an exception by default. (changed in version 0.14) + Only the following combinations make sense for family and link: ============= ===== === ===== ====== ======= === ==== ====== ====== ==== diff --git a/statsmodels/robust/norms.py b/statsmodels/robust/norms.py --- a/statsmodels/robust/norms.py +++ b/statsmodels/robust/norms.py @@ -831,10 +831,12 @@ class MQuantileNorm(RobustNorm): in QuantileRegression but replaces the L1 absolute value by a chosen base norm. - rho_q(u) = |q - I(q < 0)| * rho_base(u) - or - rho_q(u) = q * rho_base(u) if u >= 0 - rho_q(u) = (1 - q) * rho_base(u) if u < 0 + rho_q(u) = |q - I(q < 0)| * rho_base(u) + + or, equivalently, + + rho_q(u) = q * rho_base(u) if u >= 0 + rho_q(u) = (1 - q) * rho_base(u) if u < 0 Parameters @@ -854,9 +856,22 @@ class MQuantileNorm(RobustNorm): References ---------- - Newey Powell - Jones - Bianchi Salvati + + .. [*] Bianchi, Annamaria, and Nicola Salvati. 2015. β€œAsymptotic Properties + and Variance Estimators of the M-Quantile Regression Coefficients + Estimators.” Communications in Statistics - Theory and Methods 44 (11): + 2416–29. doi:10.1080/03610926.2013.791375. + + .. [*] Breckling, Jens, and Ray Chambers. 1988. β€œM-Quantiles.” + Biometrika 75 (4): 761–71. doi:10.2307/2336317. + + .. [*] Jones, M. C. 1994. β€œExpectiles and M-Quantiles Are Quantiles.” + Statistics & Probability Letters 20 (2): 149–53. + doi:10.1016/0167-7152(94)90031-0. + + .. [*] Newey, Whitney K., and James L. Powell. 1987. β€œAsymmetric Least + Squares Estimation and Testing.” Econometrica 55 (4): 819–47. + doi:10.2307/1911031. ... """ diff --git a/statsmodels/stats/power.py b/statsmodels/stats/power.py --- a/statsmodels/stats/power.py +++ b/statsmodels/stats/power.py @@ -30,6 +30,8 @@ """ +import warnings + import numpy as np from scipy import stats, optimize from statsmodels.tools.rootfinding import brentq_expanding @@ -232,7 +234,7 @@ def ftest_anova_power(effect_size, nobs, alpha, k_groups=2, df=None): return pow_#, crit -def ftest_power(effect_size, df_num, df_denom, alpha, ncc=1): +def ftest_power(effect_size, df2, df1, alpha, ncc=1): '''Calculate the power of a F-test. Parameters @@ -240,10 +242,12 @@ def ftest_power(effect_size, df_num, df_denom, alpha, ncc=1): effect_size : float standardized effect size, mean divided by the standard deviation. effect size has to be positive. - df_num : int or float - numerator degrees of freedom. - df_denom : int or float - denominator degrees of freedom. + df2 : int or float + Denominator degrees of freedom. + This corresponds to the df_resid in Wald tests. + df1 : int or float + Numerator degrees of freedom. + This corresponds to the number of constraints in Wald tests. alpha : float in interval (0,1) significance level, e.g. 0.05, is the probability of a type I error, that is wrong rejections if the Null Hypothesis is true. @@ -260,18 +264,25 @@ def ftest_power(effect_size, df_num, df_denom, alpha, ncc=1): Notes ----- + changed in 0.14: use df2, df1 instead of df_num, df_denom as arg names. + The latter had reversed meaning. + + The sample size is given implicitly by ``df2`` with fixed number of + constraints given by numerator degrees of freedom ``df1``: - sample size is given implicitly by df_num + nobs = df2 + df1 + ncc - set ncc=0 to match t-test, or f-test in LikelihoodModelResults. + Set ncc=0 to match t-test, or f-test in LikelihoodModelResults. ncc=1 matches the non-centrality parameter in R::pwr::pwr.f2.test ftest_power with ncc=0 should also be correct for f_test in regression - models, with df_num and d_denom as defined there. (not verified yet) + models, with df_num (df1) as number of constraints and d_denom (df2) as + df_resid. ''' + df_num, df_denom = df1, df2 nc = effect_size**2 * (df_denom + df_num + ncc) - crit = stats.f.isf(alpha, df_denom, df_num) - pow_ = stats.ncf.sf(crit, df_denom, df_num, nc) + crit = stats.f.isf(alpha, df_num, df_denom) + pow_ = stats.ncf.sf(crit, df_num, df_denom, nc) return pow_ #, crit, nc @@ -581,8 +592,8 @@ def solve_power(self, effect_size=None, nobs=None, alpha=None, power=None, Parameters ---------- effect_size : float - standardized effect size, mean divided by the standard deviation. - effect size has to be positive. + Standardized effect size.The effect size is here Cohen's f, square + root of "f2". nobs : int or float sample size, number of observations. alpha : float in interval (0,1) @@ -868,22 +879,63 @@ def solve_power(self, effect_size=None, nobs1=None, alpha=None, power=None, class FTestPower(Power): - '''Statistical Power calculations for generic F-test + """Statistical Power calculations for generic F-test of a constraint - ''' + This is based on Cohen's f as effect size measure. + + Warning: Methods in this class have the names df_num and df_denom reversed. + + Examples + -------- + Sample size and power for multiple regression base on R-squared + + Compute effect size from R-squared + + >>> r2 = 0.1 + >>> f2 = r2 / (1 - r2) + >>> f = np.sqrt(f2) + >>> r2, f2, f + (0.1, 0.11111111111111112, 0.33333333333333337) + + Find sample size by solving for denominator df, wrongly named ``df_num`` + + >>> df1 = 1 # number of constraints in hypothesis test + >>> df2 = FTestPower().solve_power(effect_size=f, alpha=0.1, power=0.9, + df_denom=df1) + >>> ncc = 1 # default + >>> nobs = df2 + df1 + ncc + >>> df2, nobs + (76.46459758305376, 78.46459758305376) + + verify power at df2 + + >>> FTestPower().power(effect_size=f, alpha=0.1, df_denom=df1, df_num=df2) + 0.8999999972109698 + + """ def power(self, effect_size, df_num, df_denom, alpha, ncc=1): '''Calculate the power of a F-test. + The effect size is Cohen's ``f``, square root of ``f2``. + + The sample size is given by ``nobs = df_denom + df_num + ncc`` + + Warning: The meaning of df_num and df_denom is reversed. + Parameters ---------- effect_size : float - standardized effect size, mean divided by the standard deviation. - effect size has to be positive. + Standardized effect size. The effect size is here Cohen's ``f``, + square root of ``f2``. df_num : int or float - numerator degrees of freedom. + Warning incorrect name + denominator degrees of freedom, + This corresponds to the number of constraints in Wald tests. df_denom : int or float - denominator degrees of freedom. + Warning incorrect name + numerator degrees of freedom. + This corresponds to the df_resid in Wald tests. alpha : float in interval (0,1) significance level, e.g. 0.05, is the probability of a type I error, that is wrong rejections if the Null Hypothesis is true. @@ -916,7 +968,7 @@ def power(self, effect_size, df_num, df_denom, alpha, ncc=1): #method is only added to have explicit keywords and docstring def solve_power(self, effect_size=None, df_num=None, df_denom=None, - nobs=None, alpha=None, power=None, ncc=1): + alpha=None, power=None, ncc=1, **kwargs): '''solve for any one parameter of the power of a F-test for the one sample F-test the keywords are: @@ -924,14 +976,26 @@ def solve_power(self, effect_size=None, df_num=None, df_denom=None, Exactly one needs to be ``None``, all others need numeric values. + The effect size is Cohen's ``f``, square root of ``f2``. + + The sample size is given by ``nobs = df_denom + df_num + ncc``. + + Warning: The meaning of df_num and df_denom is reversed. Parameters ---------- effect_size : float - standardized effect size, mean divided by the standard deviation. - effect size has to be positive. - nobs : int or float - sample size, number of observations. + Standardized effect size. The effect size is here Cohen's ``f``, + square root of ``f2``. + df_num : int or float + Warning incorrect name + denominator degrees of freedom, + This corresponds to the number of constraints in Wald tests. + Sample size is given by ``nobs = df_denom + df_num + ncc`` + df_denom : int or float + Warning incorrect name + numerator degrees of freedom. + This corresponds to the df_resid in Wald tests. alpha : float in interval (0,1) significance level, e.g. 0.05, is the probability of a type I error, that is wrong rejections if the Null Hypothesis is true. @@ -939,10 +1003,13 @@ def solve_power(self, effect_size=None, df_num=None, df_denom=None, power of the test, e.g. 0.8, is one minus the probability of a type II error. Power is the probability that the test correctly rejects the Null Hypothesis if the Alternative Hypothesis is true. - alternative : str, 'two-sided' (default) or 'one-sided' - extra argument to choose whether the power is calculated for a - two-sided (default) or one sided test. - 'one-sided' assumes we are in the relevant tail. + ncc : int + degrees of freedom correction for non-centrality parameter. + see Notes + kwargs : empty + ``kwargs`` are not used and included for backwards compatibility. + If ``nobs`` is used as keyword, then a warning is issued. All + other keywords in ``kwargs`` raise a ValueError. Returns ------- @@ -961,6 +1028,11 @@ def solve_power(self, effect_size=None, df_num=None, df_denom=None, where this fails. ''' + if kwargs: + if "nobs" in kwargs: + warnings.warn("nobs is not used") + else: + raise ValueError(f"incorrect keyword(s) {kwargs}") return super(FTestPower, self).solve_power(effect_size=effect_size, df_num=df_num, df_denom=df_denom, @@ -971,6 +1043,12 @@ def solve_power(self, effect_size=None, df_num=None, df_denom=None, class FTestAnovaPower(Power): '''Statistical Power calculations F-test for one factor balanced ANOVA + This is based on Cohen's f as effect size measure. + + See Also + -------- + statsmodels.stats.oneway.effectsize_oneway + ''' def power(self, effect_size, nobs, alpha, k_groups=2): @@ -979,8 +1057,8 @@ def power(self, effect_size, nobs, alpha, k_groups=2): Parameters ---------- effect_size : float - standardized effect size, mean divided by the standard deviation. - effect size has to be positive. + standardized effect size. The effect size is here Cohen's f, square + root of "f2". nobs : int or float sample size, number of observations. alpha : float in interval (0,1) diff --git a/statsmodels/tsa/stattools.py b/statsmodels/tsa/stattools.py --- a/statsmodels/tsa/stattools.py +++ b/statsmodels/tsa/stattools.py @@ -1472,9 +1472,11 @@ def grangercausalitytests(x, maxlag, addconst=True, verbose=None): >>> data = data.data[["realgdp", "realcons"]].pct_change().dropna() All lags up to 4 + >>> gc_res = grangercausalitytests(data, 4) Only lag 4 + >>> gc_res = grangercausalitytests(data, [4]) """ x = array_like(x, "x", ndim=2)
diff --git a/statsmodels/stats/tests/test_power.py b/statsmodels/stats/tests/test_power.py --- a/statsmodels/stats/tests/test_power.py +++ b/statsmodels/stats/tests/test_power.py @@ -688,6 +688,19 @@ def setup_class(cls): # precision for test_power cls.decimal = 5 + def test_kwargs(self): + + with pytest.warns(UserWarning): + smp.FTestPower().solve_power( + effect_size=0.3, alpha=0.1, power=0.9, df_denom=2, + nobs=None) + + with pytest.raises(ValueError): + smp.FTestPower().solve_power( + effect_size=0.3, alpha=0.1, power=0.9, df_denom=2, + junk=3) + + def test_power_solver(): # messing up the solver to trigger backup
Incorrect comment character in R code #### Describe the bug The R script [CH.r](https://github.com/statsmodels/statsmodels/blob/main/statsmodels/stats/libqsturng/CH.r) appears to use `%` as a comment character rather than `#`. This invalid R code is problematic when also using [`renv`](https://rstudio.github.io/renv/index.html) (an R virtual environment tool, similar to `venv` for Python) for dependency detection. Changing the comment character to `#` should fix the issue – happy to open a simple PR addressing this if it makes things easier. #### Code Sample Steps to reproduce (assuming [R is installed](https://www.r-project.org/)) ```bash # Create venv and install statsmodels mkdir renv-issue && cd renv-issue python3 -m venv venv/ source venv/bin/activate pip install statsmodels # Install renv R package R -e "install.packages('remotes', repos='http://cran.us.r-project.org')" R -e "remotes::install_github('rstudio/[email protected]')" # initialize renv library (and discover dependencies) R -e "renv::init()" ``` That last line gives the following error, with unexpected input at the `%` character: ``` WARNING: One or more problems were discovered while enumerating dependencies. /Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/statsmodels/stats/libqsturng/CH.r ------------------------------------------------------------------------------------------------ ERROR 1: /Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/statsmodels/stats/libqsturng/CH.r:1:1: unexpected input 1: % Copyright (c) 2011, Roger Lew BSD [see LICENSE.txt] ^ ``` <details> This is not directly an issue for using `statsmodels` (as far as I can tell), but is creating some issues when using `renv` in a multi-lingual project that also contains a Python virtual environment containing `statsmodels`. The `renv` package automatically discovers R dependencies in the project directory: when it encounters the invalid R code in `CH.r` it throws an `unexpected input` warning requiring user y/n prompt to proceed. I realize that `renv` provides a option to use a [`.renvignore` file](https://rstudio.github.io/renv/reference/dependencies.html#ignoring-files) analogous to `.gitignore` to which the virtual environment could (should?) be added. But thought I'd raise the flag on the R syntax here anyway. - βœ… I did not find any issue referencing `CH.r` in current issues. - βœ… Issue is present in current version of `main` branch. </details> #### Expected Output `R -e renv::init()` in the above reprex should not display the error in #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> ``` INSTALLED VERSIONS ------------------ Python: 3.9.5.final.0 OS: Darwin 21.6.0 Darwin Kernel Version 21.6.0: Sat Jun 18 17:07:25 PDT 2022; root:xnu-8020.140.41~1/RELEASE_X86_64 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.13.5 (/Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.24.1 (/Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/numpy) scipy: 1.10.0 (/Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/scipy) pandas: 1.5.3 (/Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/pandas) dateutil: 2.8.2 (/Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/dateutil) patsy: 0.5.3 (/Users/mike.garcia/renv-issue/venv/lib/python3.9/site-packages/patsy) Optional Dependencies ===================== matplotlib: Not installed cvxopt: Not installed joblib: Not installed Developer Tools ================ IPython: Not installed jinja2: Not installed sphinx: Not installed pygments: Not installed pytest: Not installed virtualenv: Not installed ``` </details>
"2023-02-03T18:15:27Z"
0.13
[ "statsmodels/stats/tests/test_power.py::TestTTPowerOneS6::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS6::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS6::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS6::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS3::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS3::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS3::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS3::test_roots", "statsmodels/stats/tests/test_power.py::TestFtestAnovaPower::test_positional", "statsmodels/stats/tests/test_power.py::TestFtestAnovaPower::test_power", "statsmodels/stats/tests/test_power.py::TestFtestAnovaPower::test_roots", "statsmodels/stats/tests/test_power.py::TestFtestAnovaPower::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS4::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS4::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS4::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS4::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS2::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS2::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS2::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS2::test_roots", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp2::test_roots", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp2::test_power_plot", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp2::test_power", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp2::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS2::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS2::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS2::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS2::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS5::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS5::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS5::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS5::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS1::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS1::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS1::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS1::test_power_plot", "statsmodels/stats/tests/test_power.py::TestFtestPower::test_positional", "statsmodels/stats/tests/test_power.py::TestFtestPower::test_roots", "statsmodels/stats/tests/test_power.py::TestFtestPower::test_power", "statsmodels/stats/tests/test_power.py::TestNormalIndPower2::test_power", "statsmodels/stats/tests/test_power.py::TestNormalIndPower2::test_power_plot", "statsmodels/stats/tests/test_power.py::TestNormalIndPower2::test_roots", "statsmodels/stats/tests/test_power.py::TestNormalIndPower2::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS5::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS5::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS5::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerOneS5::test_roots", "statsmodels/stats/tests/test_power.py::test_normal_power_explicit", "statsmodels/stats/tests/test_power.py::test_ftest_power", "statsmodels/stats/tests/test_power.py::test_normal_sample_size_one_tail", "statsmodels/stats/tests/test_power.py::test_power_solver", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS6::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS6::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS6::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS6::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS1::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS1::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS1::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS1::test_power", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp1::test_positional", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp1::test_roots", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp1::test_power_plot", "statsmodels/stats/tests/test_power.py::TestNormalIndPower_onesamp1::test_power", "statsmodels/stats/tests/test_power.py::TestChisquarePower::test_roots", "statsmodels/stats/tests/test_power.py::TestChisquarePower::test_positional", "statsmodels/stats/tests/test_power.py::TestChisquarePower::test_power_plot", "statsmodels/stats/tests/test_power.py::TestChisquarePower::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS4::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS4::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS4::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS4::test_positional", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS3::test_roots", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS3::test_power_plot", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS3::test_power", "statsmodels/stats/tests/test_power.py::TestTTPowerTwoS3::test_positional", "statsmodels/stats/tests/test_power.py::TestNormalIndPower1::test_roots", "statsmodels/stats/tests/test_power.py::TestNormalIndPower1::test_power_plot", "statsmodels/stats/tests/test_power.py::TestNormalIndPower1::test_positional", "statsmodels/stats/tests/test_power.py::TestNormalIndPower1::test_power" ]
[ "statsmodels/stats/tests/test_power.py::TestFtestPower::test_kwargs" ]
Python
[]
[]
statsmodels/statsmodels
8,664
statsmodels__statsmodels-8664
[ "8663" ]
e144c6a8b8de3ee17e814bbb8962cdb890b66901
diff --git a/statsmodels/iolib/summary2.py b/statsmodels/iolib/summary2.py --- a/statsmodels/iolib/summary2.py +++ b/statsmodels/iolib/summary2.py @@ -103,7 +103,7 @@ def add_dict(self, d, ncols=2, align='l', float_format="%.4f"): def add_text(self, string): """Append a note to the bottom of the summary table. In ASCII tables, - the note will be wrapped to table width. Notes are not indendented. + the note will be wrapped to table width. Notes are not indented. """ self.extra_txt.append(string) @@ -201,7 +201,12 @@ def as_html(self): tab = [x.as_html() for x in simple_tables] tab = '\n'.join(tab) - return tab + temp_txt = [st.replace('\n', '<br/>\n')for st in self.extra_txt] + txt = '<br/>\n'.join(temp_txt) + + out = '<br/>\n'.join([tab, txt]) + + return out def as_latex(self, label=''): """Generate LaTeX Summary Table @@ -234,8 +239,12 @@ def as_latex(self, label=''): # create single tabular object for summary_col tab = re.sub(to_replace, r'\\midrule\n', tab) - out = '\\begin{table}', title, label, tab, '\\end{table}' - out = '\n'.join(out) + non_captioned = '\\begin{table}', title, label, tab, '\\end{table}' + non_captioned = '\n'.join(non_captioned) + + txt = ' \\newline \n'.join(self.extra_txt) + out = non_captioned + '\n\\bigskip\n' + txt + return out diff --git a/statsmodels/regression/linear_model.py b/statsmodels/regression/linear_model.py --- a/statsmodels/regression/linear_model.py +++ b/statsmodels/regression/linear_model.py @@ -721,7 +721,8 @@ class WLS(RegressionModel): >>> results.tvalues array([ 2.0652652 , 0.35684428]) >>> print(results.t_test([1, 0])) - <T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5> + <T test: effect=array([ 2.91666667]), sd=array([[ 1.41224801]]), + t=array([[ 2.0652652]]), p=array([[ 0.04690139]]), df_denom=5> >>> print(results.f_test([0, 1])) <F test: F=array([[ 0.12733784]]), p=[[ 0.73577409]], df_denom=5, df_num=1> """ % {'params': base._model_params_doc, @@ -907,7 +908,8 @@ class OLS(WLS): ============================================================================== >>> print(results.f_test(np.identity(2))) - <F test: F=array([[159.63031026]]), p=1.2607168903696672e-20, df_denom=43, df_num=2> + <F test: F=array([[159.63031026]]), p=1.2607168903696672e-20, + df_denom=43, df_num=2> """ % {'params': base._model_params_doc, 'extra_params': base._missing_param_doc + base._extra_param_doc} @@ -1257,9 +1259,11 @@ class GLSAR(GLS): >>> results.tvalues array([ -2.10304127, 21.8047269 ]) >>> print(results.t_test([1, 0])) - <T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3> + <T test: effect=array([-0.66661205]), sd=array([[ 0.31697526]]), + t=array([[-2.10304127]]), p=array([[ 0.06309969]]), df_denom=3> >>> print(results.f_test(np.identity(2))) - <F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], df_denom=3, df_num=2> + <F test: F=array([[ 1815.23061844]]), p=[[ 0.00002372]], + df_denom=3, df_num=2> Or, equivalently @@ -1488,7 +1492,10 @@ def yule_walker(x, order=1, method="adjusted", df=None, inv=False, raise sigmasq = r[0] - (r[1:]*rho).sum() - sigma = np.sqrt(sigmasq) if not np.isnan(sigmasq) and sigmasq > 0 else np.nan + if not np.isnan(sigmasq) and sigmasq > 0: + sigma = np.sqrt(sigmasq) + else: + sigma = np.nan if inv: return rho, sigma, np.linalg.inv(R) else: @@ -2637,9 +2644,13 @@ def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs): raise ValueError('either time or groups needs to be given') groupidx = lzip([0] + tt, tt + [nobs_]) self.n_groups = n_groups = len(groupidx) - res.cov_params_default = sw.cov_nw_panel(self, maxlags, groupidx, - weights_func=weights_func, - use_correction=use_correction) + res.cov_params_default = sw.cov_nw_panel( + self, + maxlags, + groupidx, + weights_func=weights_func, + use_correction=use_correction + ) res.cov_kwds['description'] = descriptions['HAC-Panel'] elif cov_type.lower() == 'hac-groupsum': @@ -2777,7 +2788,8 @@ def summary( diagn_left = diagn_right = [] top_left = [elem for elem in top_left if elem[0] in slimlist] top_right = [elem for elem in top_right if elem[0] in slimlist] - top_right = top_right + [("",[])] * (len(top_left) - len(top_right)) + top_right = top_right + \ + [("", [])] * (len(top_left) - len(top_right)) else: diagn_left = [('Omnibus:', ["%#6.3f" % omni]), ('Prob(Omnibus):', ["%#6.3f" % omnipv]), @@ -2893,7 +2905,6 @@ def summary2( dw = durbin_watson(self.wresid) eigvals = self.eigenvals condno = self.condition_number - eigvals = np.sort(eigvals) # in increasing order diagnostic = dict([ ('Omnibus:', "%.3f" % omni), ('Prob(Omnibus):', "%.3f" % omnipv), @@ -2912,16 +2923,38 @@ def summary2( xname=xname, yname=yname, title=title) smry.add_dict(diagnostic) + etext = [] + + if not self.k_constant: + etext.append( + "RΒ² is computed without centering (uncentered) since the \ + model does not contain a constant." + ) + if hasattr(self, 'cov_type'): + etext.append(self.cov_kwds['description']) + if self.model.exog.shape[0] < self.model.exog.shape[1]: + wstr = "The input rank is higher than the number of observations." + etext.append(wstr) + # Warnings if eigvals[-1] < 1e-10: warn = "The smallest eigenvalue is %6.3g. This might indicate that\ - there are strong multicollinearity problems or that the design\ - matrix is singular." % eigvals[-1] - smry.add_text(warn) - if condno > 1000: - warn = "* The condition number is large (%.g). This might indicate \ - strong multicollinearity or other numerical problems." % condno - smry.add_text(warn) + there are strong multicollinearity problems or that the design\ + matrix is singular." % eigvals[-1] + etext.append(warn) + elif condno > 1000: + warn = "The condition number is large, %6.3g. This might indicate\ + that there are strong multicollinearity or other numerical\ + problems." % condno + etext.append(warn) + + if etext: + etext = ["[{0}] {1}".format(i + 1, text) + for i, text in enumerate(etext)] + etext.insert(0, "Notes:") + + for line in etext: + smry.add_text(line) return smry
diff --git a/statsmodels/iolib/tests/test_summary2.py b/statsmodels/iolib/tests/test_summary2.py --- a/statsmodels/iolib/tests/test_summary2.py +++ b/statsmodels/iolib/tests/test_summary2.py @@ -33,6 +33,8 @@ def test_summarycol(self): \end{tabular} \end{center} \end{table} +\bigskip +Standard errors in parentheses. ''' x = [1, 5, 7, 3, 5] x = add_constant(x) @@ -135,6 +137,8 @@ def test__repr_latex_(self): \end{tabular} \end{center} \end{table} +\bigskip +Standard errors in parentheses. ''' x = [1, 5, 7, 3, 5] x = add_constant(x)
BUG: summary2 output does not include extra_text random OLS model ``` model1.summary2().extra_txt [] model1.summary().extra_txt 'Notes:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.' ``` If there is an extra text as in `summary_col`, then it is not included in the html output, but is included in print output. https://github.com/statsmodels/statsmodels/pull/8658#issuecomment-1426013984
This looks like an issue of `RegressionResults`, a class in `regression.linear_model`. The method `summary2` creates an instance of `iolib.summary2.Summary` but does not provide the variable `etext` that is provided when the analogous `summary.Summary` is called by `summary`. I guess that `summary2.Summary` is worth fixing anyway. But I'm wondering how `summary2` (method) ended up in `RegressionResults`. I thought `iolib.summary2` was being used to compare the results of multiple regressions.
"2023-02-10T23:17:33Z"
0.13
[ "statsmodels/iolib/tests/test_summary2.py::TestSummaryLabels::test_summary_col_r2", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLabels::test_absence_of_r2", "statsmodels/iolib/tests/test_summary2.py::test_ols_summary_rsquared_label", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summary_col_ordering_preserved", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summarycol_drop_omitted", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summarycol_float_format", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_OLSsummary" ]
[ "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test_summarycol", "statsmodels/iolib/tests/test_summary2.py::TestSummaryLatex::test__repr_latex_" ]
Python
[]
[]
statsmodels/statsmodels
8,669
statsmodels__statsmodels-8669
[ "8665" ]
ef9cf3ad5bf73c84027421ff5595647f0fefb32b
diff --git a/statsmodels/discrete/discrete_model.py b/statsmodels/discrete/discrete_model.py --- a/statsmodels/discrete/discrete_model.py +++ b/statsmodels/discrete/discrete_model.py @@ -213,7 +213,7 @@ def pdf(self, X): def _check_perfect_pred(self, params, *args): endog = self.endog - fittedvalues = self.cdf(np.dot(self.exog, params[:self.exog.shape[1]])) + fittedvalues = self.predict(params) if np.allclose(fittedvalues - endog, 0): if self.raise_on_perfect_prediction: # backwards compatibility for attr raise_on_perfect_prediction @@ -770,7 +770,10 @@ def fit(self, start_params=None, method='newton', maxiter=35, start_params = np.zeros((self.K * (self.J-1))) else: start_params = np.asarray(start_params) - callback = lambda x : None # placeholder until check_perfect_pred + + if callback is None: + # placeholder until check_perfect_pred + callback = lambda x, *args : None # skip calling super to handle results from LikelihoodModel mnfit = base.LikelihoodModel.fit(self, start_params = start_params, method=method, maxiter=maxiter, full_output=full_output,
diff --git a/statsmodels/discrete/tests/test_discrete.py b/statsmodels/discrete/tests/test_discrete.py --- a/statsmodels/discrete/tests/test_discrete.py +++ b/statsmodels/discrete/tests/test_discrete.py @@ -1552,6 +1552,19 @@ def setup_class(cls): cls.res2 = res2 +def test_mnlogit_basinhopping(): + def callb(*args): + return 1 + + x = np.random.randint(0, 100, 1000) + y = np.random.randint(0, 3, 1000) + model = MNLogit(y, sm.add_constant(x)) + # smoke tests for basinhopping and callback #8665 + model.fit(method='basinhopping') + model.fit(method='basinhopping', callback=callb) + + + def test_perfect_prediction(): cur_dir = os.path.dirname(os.path.abspath(__file__)) iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')
BUG: MNLogit fit breaks with basinhopping optimizer because of callback https://stackoverflow.com/questions/75417415/basinhopping-causes-error-with-statsmodels-mnlogit I can replicate the example Problem is that callback is defined in the MNLogit.fit function and the keyword argument is ignored. That callback is inconsistent with basinhopping which calls callback with 3 args ``` 703 # BasinHoppingRunner, so run the callback 704 if callable(callback): --> 705 callback(bh.storage.minres.x, bh.storage.minres.fun, True) 706 707 # start main iteration loop TypeError: <lambda>() takes 1 positional argument but 3 were given ``` We cannot override the hardcoded callback using the fit keyword. Aside: MNLogit.fit does not call super, it calls the base model directly `mnfit = base.LikelihoodModel.fit(self, ...`
minimal fix is to change in fit method ``` if callback is None: callback = lambda *x : None ``` But I guess we don't need the callback if we don't call `super().fit` method of either Binary or Discrete Model. The underlying problem is that scipy.optimize fmin_bfgs (and I guess others) has 1 arg in callback, while basinhopping has 3 args in callback. The first arg is params `x` in both. I guess (partial) separation for MNLogit would be if we can perfectly predict at least one level/choice of endog. `DiscreteModel._check_perfect_pred` would need to check along 0 axis instead of `if np.allclose(fittedvalues - endog, 0)` np.allclose does not have an axis keyword option, so we need `max(abs(resid), axis=0)` directly for some atol. or instead we want to check that the prediction/fittedvalues in one column is close to 1 I guess, we also need to check the reference category (This cannot be included in a quick bugfix because we need to verify, unit test the separation case, check with other packages)
"2023-02-13T16:37:36Z"
0.13
[ "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroMNLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor_categorical", "statsmodels/discrete/tests/test_discrete.py::test_isdummy", "statsmodels/discrete/tests/test_discrete.py::test_poisson_newton", "statsmodels/discrete/tests/test_discrete.py::test_poisson_predict", "statsmodels/discrete/tests/test_discrete.py::test_issue_339", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor", "statsmodels/discrete/tests/test_discrete.py::test_issue_341", "statsmodels/discrete/tests/test_discrete.py::test_non_binary", "statsmodels/discrete/tests/test_discrete.py::test_formula_missing_exposure", "statsmodels/discrete/tests/test_discrete.py::test_predict_with_exposure", "statsmodels/discrete/tests/test_discrete.py::test_binary_pred_table_zeros", "statsmodels/discrete/tests/test_discrete.py::test_perfect_prediction", "statsmodels/discrete/tests/test_discrete.py::test_negative_binomial_default_alpha_param", "statsmodels/discrete/tests/test_discrete.py::test_iscount", "statsmodels/discrete/tests/test_discrete.py::test_cov_confint_pandas", "statsmodels/discrete/tests/test_discrete.py::test_t_test", "statsmodels/discrete/tests/test_discrete.py::test_optim_kwds_prelim", "statsmodels/discrete/tests/test_discrete.py::test_unchanging_degrees_of_freedom", "statsmodels/discrete/tests/test_discrete.py::test_null_options", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_float_name", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_newton", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_basic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_mean_var", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_wald", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_score", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_hessian", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_fit_regularized", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_t", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p1", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p2", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestSweepAlphaL1::test_sweep_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_dummy_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_converged", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_init_kwargs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_diagnostic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_aic" ]
[ "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_basinhopping" ]
Python
[]
[]
statsmodels/statsmodels
8,674
statsmodels__statsmodels-8674
[ "8672" ]
d4fee3c81029f10d24c9b021e5df01c5f0a445dd
diff --git a/statsmodels/discrete/discrete_model.py b/statsmodels/discrete/discrete_model.py --- a/statsmodels/discrete/discrete_model.py +++ b/statsmodels/discrete/discrete_model.py @@ -136,11 +136,13 @@ def _pandas_to_dummies(endog): if endog.shape[1] == 1: yname = endog.columns[0] endog_dummies = get_dummies(endog.iloc[:, 0]) - else: # series + else: # assume already dummies yname = 'y' endog_dummies = endog else: yname = endog.name + if yname is None: + yname = 'y' endog_dummies = get_dummies(endog) ynames = endog_dummies.columns.tolist()
diff --git a/statsmodels/discrete/tests/test_discrete.py b/statsmodels/discrete/tests/test_discrete.py --- a/statsmodels/discrete/tests/test_discrete.py +++ b/statsmodels/discrete/tests/test_discrete.py @@ -1706,6 +1706,10 @@ def test_mnlogit_factor(): params = res.params summary = res.summary() predicted = res.predict(exog.iloc[:5, :]) + # check endog is series with no name #8672 + endogn = dta['endog'] + endogn.name = None + mod = sm.MNLogit(endogn, exog) # with patsy mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data)
BUG: Init of MNLogit fails if endogenous variable is unnamed Pandas Series #### Describe the bug MNLogit throws a "TypeError: MultiIndex.name must be a hashable type" if the provided endogenous variable is a pandas Series whose `name` attribute is not set. The resulting stack trace is quite cryptic which makes it very hard for users to figure out what is going on. Workaround: Make sure the Pandas Series is named. #### Code Sample, a copy-pastable example if possible ```python import pandas as pd import statsmodels.api as sm # This works fine X = pd.DataFrame(data={"var1": [.1, .2, .3, .4, ], "var2": [.0, .1, .2, .3,]}, index = range(4)) y = pd.Series([2, 2, 1, 0, ], index=range(4), name="output") sm.MNLogit(endog=y, exog=X).fit() # This fails X = pd.DataFrame(data={"var1": [.1, .2, .3, .4, ], "var2": [.0, .1, .2, .3,]}, index = range(4)) y = pd.Series([2, 2, 1, 0, ], index=range(4)) # <--- name attribute not set and defaults to None sm.MNLogit(endog=y, exog=X).fit() ``` <details> Here is the resulting stack trace. The issue is in happening [here](https://github.com/statsmodels/statsmodels/blob/ba8a0076ef9f464f0b7b3b432fdfad125dcdb12f/statsmodels/discrete/discrete_model.py#L2947). If `y.name` is None, the `yname` variable in the initializer takes a list as value for some reason. In consequence, the init of the Pandas Multiindex fails. ``` File "/Users/sven/.../statsmodels_mwe.py", line 13, in <module> sm.MNLogit(endog=y, exog=X).fit() File "/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/statsmodels/discrete/discrete_model.py", line 2270, in __init__ idx = MultiIndex.from_product((ynames[1:], self.data.xnames), File "/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/pandas/core/indexes/multi.py", line 644, in from_product return cls(levels, codes, sortorder=sortorder, names=names) File "/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/pandas/core/indexes/multi.py", line 334, in __new__ result._set_names(names) File "/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/pandas/core/indexes/multi.py", line 1489, in _set_names raise TypeError( TypeError: MultiIndex.name must be a hashable type ``` </details> ~~If the issue has not been resolved, please file it in the issue tracker.~~ Don't know how to do that. #### Expected Output ... it should not throw an error. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.8.16.final.0 OS: Darwin 20.6.0 Darwin Kernel Version 20.6.0: Thu Sep 29 20:15:11 PDT 2022; root:xnu-7195.141.42~1/RELEASE_X86_64 x86_64 byteorder: little LC_ALL: None LANG: None statsmodels =========== Installed: 0.13.5 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.24.1 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/numpy) scipy: 1.10.0 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/scipy) pandas: 1.5.3 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/pandas) dateutil: 2.8.2 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/dateutil) patsy: 0.5.3 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.6.3 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/matplotlib) Backend MacOSX is interactive backend. Turning interactive mode on. backend: MacOSX cvxopt: Not installed joblib: 1.2.0 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/joblib) Developer Tools ================ IPython: 8.8.0 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/IPython) jinja2: 3.1.2 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/jinja2) sphinx: Not installed pygments: 2.14.0 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/pygments) pytest: 7.2.1 (/Users/sven/miniconda3/envs/benjamin/lib/python3.8/site-packages/pytest) virtualenv: Not installed </details>
Thanks for reporting this. It's a bug. But I don't know why it didn't show up in the past. That code should be unchanged for some time. My guess is that somewhere in the datahandling there is no default yname "y" if there is no name provided. This should be MNLogit or MultinomialModel specifc because models like OLS default to "y" as generic default name. If endog is a numpy array, it defaults to "y" as endog_names The relevant code is in discrete_model `_pandas_to_dummies` function which is called by `MultinomialModel._handle_data` `get_dummies` is a pandas function, so we need to check how pandas creates default column names from a series with no name. @svenbuechel Thanks for reporting the bug I will fix it today
"2023-02-15T16:16:01Z"
0.13
[ "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDogleg::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1BFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitCG::test_params", "statsmodels/discrete/tests/test_discrete.py::test_non_binary", "statsmodels/discrete/tests/test_discrete.py::test_iscount", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor_categorical", "statsmodels/discrete/tests/test_discrete.py::test_isdummy", "statsmodels/discrete/tests/test_discrete.py::test_poisson_predict", "statsmodels/discrete/tests/test_discrete.py::test_issue_339", "statsmodels/discrete/tests/test_discrete.py::test_negative_binomial_default_alpha_param", "statsmodels/discrete/tests/test_discrete.py::test_formula_missing_exposure", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_basinhopping", "statsmodels/discrete/tests/test_discrete.py::test_poisson_newton", "statsmodels/discrete/tests/test_discrete.py::test_perfect_prediction", "statsmodels/discrete/tests/test_discrete.py::test_predict_with_exposure", "statsmodels/discrete/tests/test_discrete.py::test_binary_pred_table_zeros", "statsmodels/discrete/tests/test_discrete.py::test_issue_341", "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_float_name", "statsmodels/discrete/tests/test_discrete.py::test_t_test", "statsmodels/discrete/tests/test_discrete.py::test_optim_kwds_prelim", "statsmodels/discrete/tests/test_discrete.py::test_null_options", "statsmodels/discrete/tests/test_discrete.py::test_unchanging_degrees_of_freedom", "statsmodels/discrete/tests/test_discrete.py::test_cov_confint_pandas", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitLBFGSBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBasinhopping::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNM::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestPoissonL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewtonPrepend::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_lnalpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB1Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_endog_names", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_k", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_mean", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_j", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_margeff_dummy", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitNewtonBaseZero::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroLogit::test_converged", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeAdditionalOptions::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_diagnostic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_pearson", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog1", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_exog2", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNCG::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeometricBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fittedvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPNB2Newton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNB2Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_resid", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_margeff_dummy_overall", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_eydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_dummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_count_dummy_dydxmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dyexmean", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_dydxoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexoverall", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eydxzero", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_nodummy_eyexmedian", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestLogitBFGS::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_init_kwargs", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitNewton::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_mean_var", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_predict_prob", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_basic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_newton", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_underdispersion::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_wald", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p2::test_alpha", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitPowell::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_tests", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroProbit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p1", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialPPredictProb::test_predict_prob_p2", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestMNLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_t", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_init_kwds", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_score", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_hessian", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_p1::test_fit_regularized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_summary_latex", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_response", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict_xb", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_zstat", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_predict", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_dev", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_dof", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llr_pvalue", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_loglikeobs", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pred_table", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_distr", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_jac", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_fit_regularized_invalid_method", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_resid_generalized", "statsmodels/discrete/tests/test_discrete.py::TestProbitMinimizeDefault::test_pvalues", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialGeoL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestProbitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestL1AlphaZeroMNLogit::test_basic_results", "statsmodels/discrete/tests/test_discrete.py::TestPoissonNull::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_nnz_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_llnull", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialNBP1Null::test_start_null", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestLogitL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_cov_params", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_df", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_bad_r_matrix", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_f_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_t_test", "statsmodels/discrete/tests/test_discrete.py::TestNegativeBinomialL1Compatability::test_params", "statsmodels/discrete/tests/test_discrete.py::TestSweepAlphaL1::test_sweep_alpha", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_df", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_conf_int", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_params", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_aic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_llf", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bse", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_bic", "statsmodels/discrete/tests/test_discrete.py::TestGeneralizedPoisson_transparams::test_alpha" ]
[ "statsmodels/discrete/tests/test_discrete.py::test_mnlogit_factor" ]
Python
[]
[]
statsmodels/statsmodels
8,702
statsmodels__statsmodels-8702
[ "8704" ]
27b28335f5921accbbda5f8ea6f0bc1768db282a
diff --git a/statsmodels/regression/linear_model.py b/statsmodels/regression/linear_model.py --- a/statsmodels/regression/linear_model.py +++ b/statsmodels/regression/linear_model.py @@ -2429,7 +2429,7 @@ def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs): - 'HAC': heteroskedasticity-autocorrelation robust covariance - ``maxlag`` : integer, required + ``maxlags`` : integer, required number of lags to use ``kernel`` : {callable, str}, optional @@ -2471,7 +2471,7 @@ def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs): ``time`` : array_like, required index of time periods - ``maxlag`` : integer, required + ``maxlags`` : integer, required number of lags to use ``kernel`` : {callable, str}, optional The available kernels are ['bartlett', 'uniform']. The default is @@ -2495,7 +2495,7 @@ def get_robustcov_results(self, cov_type='HC1', use_t=None, **kwargs): indicator for groups ``time`` : array_like[int] index of time periods - ``maxlag`` : int, required + ``maxlags`` : int, required number of lags to use ``kernel`` : {callable, str}, optional Available kernels are ['bartlett', 'uniform'], default diff --git a/statsmodels/stats/proportion.py b/statsmodels/stats/proportion.py --- a/statsmodels/stats/proportion.py +++ b/statsmodels/stats/proportion.py @@ -115,11 +115,12 @@ def proportion_confint(count, nobs, alpha:float=0.05, method="normal"): Parameters ---------- - count : {int, array_like} + count : {int or float, array_like} number of successes, can be pandas Series or DataFrame. Arrays - must contain integer values. - nobs : {int, array_like} - total number of trials. Arrays must contain integer values. + must contain integer values if method is "binom_test". + nobs : {int or float, array_like} + total number of trials. Arrays must contain integer values if method + is "binom_test". alpha : float Significance level, default 0.05. Must be in (0, 1) method : {"normal", "agresti_coull", "beta", "wilson", "binom_test"} @@ -183,8 +184,9 @@ def _check(x: np.ndarray, name: str) -> np.ndarray: ) return y - count_a = _check(np.asarray(count_a), "count") - nobs_a = _check(np.asarray(nobs_a), "count") + if method == "binom_test": + count_a = _check(np.asarray(count_a), "count") + nobs_a = _check(np.asarray(nobs_a), "count") q_ = count_a / nobs_a alpha_2 = 0.5 * alpha @@ -1541,7 +1543,6 @@ def score_test_proportions_2indep(count1, nobs1, count2, nobs2, value=None, prop0 = 2 * p * np.cos(a) - tmp2 / (3 * tmp3) prop1 = prop0 + diff - correction = True var = prop1 * (1 - prop1) / nobs1 + prop0 * (1 - prop0) / nobs0 if correction: var *= nobs / (nobs - 1)
diff --git a/statsmodels/stats/tests/test_proportion.py b/statsmodels/stats/tests/test_proportion.py --- a/statsmodels/stats/tests/test_proportion.py +++ b/statsmodels/stats/tests/test_proportion.py @@ -98,6 +98,12 @@ def test_confint_proportion_ndim(method): method=method) assert_allclose((ci_arr2[0][1, 2], ci_arr[1][1, 2]), ci12, rtol=1e-13) + # check floating point values + ci_arr2 = proportion_confint(count + 1e-4, nobs[1, 2], alpha=0.05, + method=method) + # should be close to values with integer values + assert_allclose((ci_arr2[0][1, 2], ci_arr[1][1, 2]), ci12, rtol=1e-4) + def test_samplesize_confidenceinterval_prop(): #consistency test for samplesize to achieve confidence_interval @@ -693,7 +699,7 @@ def test_confint_2indep_propcis(): ci = 0.0270416, 0.3452912 ci1 = confint_proportions_2indep(count1, nobs1, count2, nobs2, compare="diff", - method="score", correction=False) + method="score", correction=True) assert_allclose(ci1, ci, atol=0.002) # lower agreement (iterative) # > wald2ci(7, 34, 1, 34, 0.95, adjust="AC") ci = 0.01161167, 0.32172166 @@ -966,12 +972,14 @@ def test_ci_symmetry_binom_test(nobs, count, array_like): def test_int_check(): + # integer values are required only if method="binom_test" with pytest.raises(ValueError): - proportion_confint(10.5, 20) + proportion_confint(10.5, 20, method="binom_test") with pytest.raises(ValueError): - proportion_confint(10, 20.5) + proportion_confint(10, 20.5, method="binom_test") with pytest.raises(ValueError): - proportion_confint(np.array([10.3]), 20) + proportion_confint(np.array([10.3]), 20, method="binom_test") + a = proportion_confint(21.0, 47, method="binom_test") b = proportion_confint(21, 47, method="binom_test") c = proportion_confint(21, 47.0, method="binom_test")
BUG: test_proportions_2indep, hard coded correction=True for score diff test Why is this hard coded and does not use to `correction` option? `correction = True` inside the path for score method for default compare="diff" Is this a bug or intentional? I don't remember
"2023-02-24T21:37:17Z"
0.13
[ "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case4]", "statsmodels/stats/tests/test_proportion.py::test_multinomial_proportions_errors", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case0]", "statsmodels/stats/tests/test_proportion.py::test_proportion_effect_size", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case5]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case5]", "statsmodels/stats/tests/test_proportion.py::test_samplesize_confidenceinterval_prop", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[jeffreys-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case4]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case3]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case5]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[jeffreys-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case4]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[jeffreys-case2]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case5]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[beta-case6]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case4]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case1]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case3]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case0]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case3]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_multinomial_proportions_zeros", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[normal-case7]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[agresti_coull-case3]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion[wilson-case0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count23-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count37-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count41-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count11-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count40-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count27-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count20-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count30-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count10-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count23-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count41-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count28-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count2-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count40-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count15-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count10-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count24-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count31-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count43-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count12-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count29-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count4-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_power_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count4-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count7-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count1-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count25-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count33-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count14-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count45-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count15-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count19-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count29-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count15-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count25-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count17-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count17-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_power_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count19-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count14-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count1-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count19-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count28-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count44-47]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count25-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count20-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count1-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count34-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count38-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count47-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count2-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count44-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count35-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count13-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count16-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count8-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count39-50]", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep_propcis", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count14-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count20-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count46-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count6-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count30-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count43-50]", "statsmodels/stats/tests/test_proportion.py::test_binom_rejection_interval", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count31-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count8-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count2-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count21-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count29-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count26-50]", "statsmodels/stats/tests/test_proportion.py::test_score_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count0-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count16-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count45-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count37-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count46-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count6-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count6-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count46-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count7-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count47-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count5-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count37-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count32-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count39-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_test_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count40-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count12-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count32-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count18-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count41-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_proportion_ztests", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count20-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count46-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count3-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count14-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count38-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count17-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count15-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count12-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count0-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count10-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count9-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count3-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count5-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count5-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count13-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count44-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count18-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count27-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count27-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count39-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count11-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count11-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count43-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count16-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count6-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count42-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count21-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count28-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count23-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count38-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_score_confint_koopman_nam", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count8-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count45-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count21-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count4-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count34-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count0-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count4-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count10-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count35-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count32-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count18-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count26-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count30-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count36-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count9-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count24-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count17-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count41-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count0-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count13-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count22-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count13-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count47-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count25-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count3-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count8-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count16-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count5-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count11-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_confint_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count23-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count7-50]", "statsmodels/stats/tests/test_proportion.py::test_binom_test", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count22-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count6]", "statsmodels/stats/tests/test_proportion.py::test_int_check", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count1-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count2-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count36-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count3-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count42-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count32-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count26-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count33-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count38-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count24-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count19-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count18-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count7-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count27-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count24-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count22-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count15]", "statsmodels/stats/tests/test_proportion.py::test_ztost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count12-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count26-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count42-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-binom_test-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count22-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count31-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count29-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-wilson-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-normal-count15]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count40-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count9-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count10]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count28-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count47-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count9-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count44-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count42-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count31-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count6]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count34-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count37-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count30-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-beta-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count33-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count11]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count13]", "statsmodels/stats/tests/test_proportion.py::test_equivalence_2indep", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count14]", "statsmodels/stats/tests/test_proportion.py::test_binom_tost", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-jeffreys-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count36-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count12]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-beta-count0]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count35-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-wilson-count13]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count4]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[wilson-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count35-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count39-50]", "statsmodels/stats/tests/test_proportion.py::test_power_ztost_prop_norm", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count36-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-agresti_coull-count7]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[normal-count1]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count34-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-jeffreys-count14]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count5]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[binom_test-count3]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[False-normal-count8]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[False-count21-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count33-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-binom_test-count2]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry[True-agresti_coull-count9]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count45-47]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_binom_test[True-count43-50]", "statsmodels/stats/tests/test_proportion.py::test_ci_symmetry_array[beta-count10]", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_default_values", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_number_pairs_1493", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_pairwiseproptest", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_scalar", "statsmodels/stats/tests/test_proportion.py::TestProportion::test_proptest" ]
[ "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[jeffreys]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[wilson]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[beta]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[agresti_coull]", "statsmodels/stats/tests/test_proportion.py::test_confint_proportion_ndim[normal]" ]
Python
[]
[]
statsmodels/statsmodels
8,735
statsmodels__statsmodels-8735
[ "8726", "8729" ]
a948e8dee9e99a08f249f67ca2ac5df68fea835b
diff --git a/statsmodels/base/model.py b/statsmodels/base/model.py --- a/statsmodels/base/model.py +++ b/statsmodels/base/model.py @@ -1097,6 +1097,7 @@ def _transform_predict_exog(self, exog, transform=True): else: # assume we need a row exog = pd.DataFrame(exog).T + exog_index = exog.index orig_exog_len = len(exog) is_dict = isinstance(exog, dict) try: diff --git a/statsmodels/base/optimizer.py b/statsmodels/base/optimizer.py --- a/statsmodels/base/optimizer.py +++ b/statsmodels/base/optimizer.py @@ -428,8 +428,9 @@ def _fit_newton(f, score, start_params, fargs, kwargs, disp=True, information returned from the solver used. If it is False, this is None. """ - check_kwargs(kwargs, ("tol",), "newton") + check_kwargs(kwargs, ("tol", "ridge_factor"), "newton") tol = kwargs.setdefault('tol', 1e-8) + ridge_factor = kwargs.setdefault('ridge_factor', 1e-10) iterations = 0 oldparams = np.inf newparams = np.asarray(start_params) diff --git a/statsmodels/discrete/count_model.py b/statsmodels/discrete/count_model.py --- a/statsmodels/discrete/count_model.py +++ b/statsmodels/discrete/count_model.py @@ -684,7 +684,9 @@ def _predict_var(self, params, mu, prob_infl): return var_ def _get_start_params(self): - start_params = self.model_main.fit(disp=0, method="nm").params + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=ConvergenceWarning) + start_params = self.model_main.fit(disp=0, method="nm").params start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params) return start_params diff --git a/statsmodels/discrete/truncated_model.py b/statsmodels/discrete/truncated_model.py --- a/statsmodels/discrete/truncated_model.py +++ b/statsmodels/discrete/truncated_model.py @@ -3,6 +3,7 @@ __all__ = ["TruncatedLFPoisson", "TruncatedLFNegativeBinomialP", "HurdleCountModel"] +import warnings import numpy as np import statsmodels.base.model as base import statsmodels.base.wrapper as wrap @@ -23,6 +24,7 @@ ) from statsmodels.tools.numdiff import approx_hess from statsmodels.tools.decorators import cache_readonly +from statsmodels.tools.sm_exceptions import ConvergenceWarning from copy import deepcopy @@ -184,7 +186,9 @@ def fit(self, start_params=None, method='bfgs', maxiter=35, offset = None model = self.model_main.__class__(self.endog, self.exog, offset=offset) - start_params = model.fit(disp=0).params + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=ConvergenceWarning) + start_params = model.fit(disp=0).params # Todo: check how we can to this in __init__ k_params = self.df_model + 1 + self.k_extra @@ -748,7 +752,9 @@ def fit(self, start_params=None, method='bfgs', maxiter=35, offset = None model = self.model_main.__class__(self.endog, self.exog, offset=offset) - start_params = model.fit(disp=0).params + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=ConvergenceWarning) + start_params = model.fit(disp=0).params mlefit = super(_RCensoredGeneric, self).fit( start_params=start_params, method=method, diff --git a/statsmodels/multivariate/manova.py b/statsmodels/multivariate/manova.py --- a/statsmodels/multivariate/manova.py +++ b/statsmodels/multivariate/manova.py @@ -71,13 +71,19 @@ def fit(self): 'mv_test directly on a MANOVA instance.') @Substitution(hypotheses_doc=_hypotheses_doc) - def mv_test(self, hypotheses=None): + def mv_test(self, hypotheses=None, skip_intercept_test=False): """ Linear hypotheses testing Parameters ---------- %(hypotheses_doc)s + skip_intercept_test : bool + If true, then testing the intercept is skipped, the model is not + changed. + Note: If a term has a numerically insignificant effect, then + an exception because of emtpy arrays may be raised. This can + happen for the intercept if the data has been demeaned. Returns ------- @@ -104,6 +110,8 @@ def mv_test(self, hypotheses=None): terms = self.data.design_info.term_name_slices hypotheses = [] for key in terms: + if skip_intercept_test and key == 'Intercept': + continue L_contrast = np.eye(self.exog.shape[1])[terms[key], :] hypotheses.append([key, L_contrast, None]) else: diff --git a/statsmodels/multivariate/multivariate_ols.py b/statsmodels/multivariate/multivariate_ols.py --- a/statsmodels/multivariate/multivariate_ols.py +++ b/statsmodels/multivariate/multivariate_ols.py @@ -423,13 +423,19 @@ def __str__(self): return self.summary().__str__() @Substitution(hypotheses_doc=_hypotheses_doc) - def mv_test(self, hypotheses=None): + def mv_test(self, hypotheses=None, skip_intercept_test=False): """ Linear hypotheses testing Parameters ---------- %(hypotheses_doc)s + skip_intercept_test : bool + If true, then testing the intercept is skipped, the model is not + changed. + Note: If a term has a numerically insignificant effect, then + an exception because of emtpy arrays may be raised. This can + happen for the intercept if the data has been demeaned. Returns ------- @@ -451,6 +457,8 @@ def mv_test(self, hypotheses=None): terms = self.design_info.term_name_slices hypotheses = [] for key in terms: + if skip_intercept_test and key == 'Intercept': + continue L_contrast = np.eye(k_xvar)[terms[key], :] hypotheses.append([key, L_contrast, None]) else:
diff --git a/statsmodels/multivariate/tests/test_manova.py b/statsmodels/multivariate/tests/test_manova.py --- a/statsmodels/multivariate/tests/test_manova.py +++ b/statsmodels/multivariate/tests/test_manova.py @@ -3,7 +3,7 @@ import numpy as np import pandas as pd import pytest -from numpy.testing import assert_almost_equal, assert_raises +from numpy.testing import assert_almost_equal, assert_raises, assert_allclose from statsmodels.multivariate.manova import MANOVA from statsmodels.multivariate.multivariate_ols import MultivariateTestResults @@ -171,3 +171,29 @@ def test_manova_test_input_validation(): def test_endog_1D_array(): assert_raises(ValueError, MANOVA.from_formula, 'Basal ~ Loc', X) + + +def test_manova_demeaned(): + # see last example in #8713 + # If a term has no effect, all eigenvalues below threshold, then computaion + # raised numpy exception with empty arrays. + # currently we have an option to skip the intercept test, but don't handle + # empty arrays directly + ng = 5 + loc = ["Basal", "Occ", "Max"] * ng + y1 = (np.random.randn(ng, 3) + [0, 0.5, 1]).ravel() + y2 = (np.random.randn(ng, 3) + [0.25, 0.75, 1]).ravel() + y3 = (np.random.randn(ng, 3) + [0.3, 0.6, 1]).ravel() + dta = pd.DataFrame(dict(Loc=loc, Basal=y1, Occ=y2, Max=y3)) + mod = MANOVA.from_formula('Basal + Occ + Max ~ C(Loc, Helmert)', data=dta) + res1 = mod.mv_test() + + # subtract sample means to have insignificant intercept + means = dta[["Basal", "Occ", "Max"]].mean() + dta[["Basal", "Occ", "Max"]] = dta[["Basal", "Occ", "Max"]] - means + mod = MANOVA.from_formula('Basal + Occ + Max ~ C(Loc, Helmert)', data=dta) + res2 = mod.mv_test(skip_intercept_test=True) + + stat1 = res1.results["C(Loc, Helmert)"]["stat"].to_numpy(float) + stat2 = res2.results["C(Loc, Helmert)"]["stat"].to_numpy(float) + assert_allclose(stat1, stat2, rtol=1e-10)
BUG: out-of-sample prediction with nan values get unexpected result This is an OLS regression model from [this page](https://www.statsmodels.org/stable/example_formulas.html): ``` import statsmodels.api as sm import statsmodels.formula.api as smf import numpy as np import pandas as pd # example from https://www.statsmodels.org/stable/example_formulas.html df = sm.datasets.get_rdataset("Guerry", "HistData").data df = df[['Lottery', 'Literacy', 'Wealth', 'Region']].dropna() mod = smf.ols(formula='Lottery ~ Literacy', data=df) res = mod.fit() ``` When this model is fitted, we can use this model to predict: ``` # out of sample prediction without NaN values. This works as expected. res.predict(pd.Series([0,1,2,3], name='Literacy')) ``` But when we have NaN values in the exog data, the `predict()` method returns unexpected result, which is a pandas series with only NaN value indexed by None: ``` # ****out of sample prediction with NaN values. This gives a strange result!**** res.predict(pd.Series([0,1,2,None,3], name='Literacy')) ``` statsmodels: 0.13.5 pandas: 1.4.4 numpy: 1.23.5 python:3.9.16 ENH/BUG: allow fit keyword ridge_factor in newton `model.fit(method="newton", ridge_factor= 1e-5)` issues a warning about unused kwarg, and ridge_factor is not transmitted to the newton optimizer. I wanted to try out fit with a large ridge factor to force convergence (to a possibly regularized solution) I'm not sure whether this ever worked or was a refactoring victim. AFAICS, it's because ridge_factor is explicit keyword and not in `kwargs` in signature, base.optimizer ``` def _fit_newton(f, score, start_params, fargs, kwargs, disp=True, maxiter=100, callback=None, retall=False, full_output=True, hess=None, ridge_factor=1e-10) ```
"2023-03-16T21:31:45Z"
0.13
[ "statsmodels/multivariate/tests/test_manova.py::test_manova_no_formula", "statsmodels/multivariate/tests/test_manova.py::test_manova_no_formula_no_hypothesis", "statsmodels/multivariate/tests/test_manova.py::test_manova_sas_example", "statsmodels/multivariate/tests/test_manova.py::test_manova_test_input_validation", "statsmodels/multivariate/tests/test_manova.py::test_endog_1D_array" ]
[ "statsmodels/multivariate/tests/test_manova.py::test_manova_demeaned" ]
Python
[]
[]
statsmodels/statsmodels
8,780
statsmodels__statsmodels-8780
[ "8779" ]
4273c0d426772e6845de6301b30f6138d6c3e290
diff --git a/statsmodels/discrete/count_model.py b/statsmodels/discrete/count_model.py --- a/statsmodels/discrete/count_model.py +++ b/statsmodels/discrete/count_model.py @@ -727,7 +727,8 @@ def get_distribution(self, params, exog=None, exog_infl=None, w = self.predict(params, exog=exog, exog_infl=exog_infl, exposure=exposure, offset=offset, which="prob-main") - distr = self.distribution(mu[:, None], 1 - w[:, None]) + # distr = self.distribution(mu[:, None], 1 - w[:, None]) + distr = self.distribution(mu, 1 - w) return distr @@ -842,7 +843,8 @@ def get_distribution(self, params, exog=None, exog_infl=None, exposure=exposure, offset=offset, which="mean-main") w = self.predict(params, exog=exog, exog_infl=exog_infl, exposure=exposure, offset=offset, which="prob-main") - distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None]) + # distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None]) + distr = self.distribution(mu, params[-1], p, 1 - w) return distr @@ -958,7 +960,8 @@ def get_distribution(self, params, exog=None, exog_infl=None, w = self.predict(params, exog=exog, exog_infl=exog_infl, exposure=exposure, offset=offset, which="prob-main") - distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None]) + # distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None]) + distr = self.distribution(mu, params[-1], p, 1 - w) return distr diff --git a/statsmodels/discrete/discrete_model.py b/statsmodels/discrete/discrete_model.py --- a/statsmodels/discrete/discrete_model.py +++ b/statsmodels/discrete/discrete_model.py @@ -677,7 +677,8 @@ def get_distribution(self, params, exog=None, offset=None): Instance of frozen scipy distribution. """ mu = self.predict(params, exog=exog, offset=offset) - distr = stats.bernoulli(mu[:, None]) + # distr = stats.bernoulli(mu[:, None]) + distr = stats.bernoulli(mu) return distr @@ -1659,7 +1660,7 @@ def predict(self, params, exog=None, exposure=None, offset=None, exposure=exposure, offset=offset, )[:, None] # uses broadcasting - return stats.poisson.pmf(y_values, mu) + return stats.poisson._pmf(y_values, mu) else: raise ValueError('Value of the `which` option is not recognized') @@ -2281,7 +2282,8 @@ def get_distribution(self, params, exog=None, exposure=None, offset=None): """ mu = self.predict(params, exog=exog, exposure=exposure, offset=offset) p = self.parameterization + 1 - distr = genpoisson_p(mu[:, None], params[-1], p) + # distr = genpoisson_p(mu[:, None], params[-1], p) + distr = genpoisson_p(mu, params[-1], p) return distr @@ -3587,8 +3589,13 @@ def predict(self, params, exog=None, exposure=None, offset=None, offset=offset ) if y_values is None: - y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1)) - return distr.pmf(y_values) + y_values = np.arange(0, np.max(self.endog) + 1) + else: + y_values = np.asarray(y_values) + + assert y_values.ndim == 1 + y_values = y_values[..., None] + return distr.pmf(y_values).T exog, offset, exposure = self._get_predict_arrays( exog=exog, @@ -3757,7 +3764,8 @@ def get_distribution(self, params, exog=None, exposure=None, offset=None): """ mu = self.predict(params, exog=exog, exposure=exposure, offset=offset) if self.loglike_method == 'geometric': - distr = stats.geom(1 / (1 + mu[:, None]), loc=-1) + # distr = stats.geom(1 / (1 + mu[:, None]), loc=-1) + distr = stats.geom(1 / (1 + mu), loc=-1) else: if self.loglike_method == 'nb2': p = 2 @@ -3768,7 +3776,8 @@ def get_distribution(self, params, exog=None, exposure=None, offset=None): q = 2 - p size = 1. / alpha * mu**q prob = size / (size + mu) - distr = nbinom(size[:, None], prob[:, None]) + # distr = nbinom(size[:, None], prob[:, None]) + distr = nbinom(size, prob) return distr @@ -4343,7 +4352,8 @@ def get_distribution(self, params, exog=None, exposure=None, offset=None): """ mu = self.predict(params, exog=exog, exposure=exposure, offset=offset) size, prob = self.convert_params(params, mu) - distr = nbinom(size[:, None], prob[:, None]) + # distr = nbinom(size[:, None], prob[:, None]) + distr = nbinom(size, prob) return distr diff --git a/statsmodels/discrete/truncated_model.py b/statsmodels/discrete/truncated_model.py --- a/statsmodels/discrete/truncated_model.py +++ b/statsmodels/discrete/truncated_model.py @@ -383,12 +383,13 @@ def predict(self, params, exog=None, exposure=None, offset=None, return probs elif which == 'prob-base': if y_values is not None: - counts = np.atleast_2d(y_values) + counts = np.asarray(y_values) else: - counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1)) + counts = np.arange(0, np.max(self.endog)+1) + probs = self.model_main.predict( params, exog=exog, exposure=np.exp(exposure), - offset=offset, which="prob", y_values=counts)[:, None] + offset=offset, which="prob", y_values=counts) return probs elif which == 'var': mu = np.exp(linpred) diff --git a/statsmodels/genmod/generalized_linear_model.py b/statsmodels/genmod/generalized_linear_model.py --- a/statsmodels/genmod/generalized_linear_model.py +++ b/statsmodels/genmod/generalized_linear_model.py @@ -2149,6 +2149,68 @@ def get_influence(self, observed=True): hat_matrix_diag=hat_matrix_diag) return infl + def get_distribution(self, exog=None, exposure=None, + offset=None, var_weights=1., n_trials=1.): + """ + Return a instance of the predictive distribution. + + Parameters + ---------- + scale : scalar + The scale parameter. + exog : array_like + The predictor variable matrix. + offset : array_like or None + Offset variable for predicted mean. + exposure : array_like or None + Log(exposure) will be added to the linear prediction. + var_weights : array_like + 1d array of variance (analytic) weights. The default is None. + n_trials : int + Number of trials for the binomial distribution. The default is 1 + which corresponds to a Bernoulli random variable. + + Returns + ------- + gen + Instance of a scipy frozen distribution based on estimated + parameters. + Use the ``rvs`` method to generate random values. + + Notes + ----- + Due to the behavior of ``scipy.stats.distributions objects``, the + returned random number generator must be called with ``gen.rvs(n)`` + where ``n`` is the number of observations in the data set used + to fit the model. If any other value is used for ``n``, misleading + results will be produced. + """ + # Note this is mostly a copy of GLM.get_prediction + # calling here results.predict avoids the exog check and trasnform + + if isinstance(self.model.family, (families.Binomial, families.Poisson, + families.NegativeBinomial)): + # use scale=1, independent of QMLE scale for discrete + scale = 1. + if self.scale != 1.: + msg = "using scale=1, no exess dispersion in distribution" + warnings.warn(msg, UserWarning) + else: + scale = self.scale + + mu = self.predict(exog, exposure, offset, which="mean") + + kwds = {} + if (np.any(n_trials != 1) and + isinstance(self.model.family, families.Binomial)): + + kwds["n_trials"] = n_trials + + distr = self.model.family.get_distribution( + mu, scale, var_weights=var_weights, **kwds) + return distr + + @Appender(base.LikelihoodModelResults.remove_data.__doc__) def remove_data(self): # GLM has alias/reference in result instance diff --git a/statsmodels/othermod/betareg.py b/statsmodels/othermod/betareg.py --- a/statsmodels/othermod/betareg.py +++ b/statsmodels/othermod/betareg.py @@ -134,6 +134,8 @@ def __init__(self, endog, exog, exog_precision=None, self._init_keys.extend(['exog_precision']) self._init_keys.extend(['link', 'link_precision']) self._null_drop_keys = ['exog_precision'] + del kwds['extra_params_names'] + self._check_kwargs(kwds) self.results_class = BetaResults self.results_class_wrapper = BetaResultsWrapper diff --git a/statsmodels/tools/numdiff.py b/statsmodels/tools/numdiff.py --- a/statsmodels/tools/numdiff.py +++ b/statsmodels/tools/numdiff.py @@ -158,7 +158,10 @@ def approx_fprime(x, f, epsilon=None, args=(), kwargs={}, centered=False): f(*((x-ei,)+args), **kwargs))/(2 * epsilon[k]) ei[k] = 0.0 - return grad.squeeze().T + if n == 1: + return grad.T + else: + return grad.squeeze().T def _approx_fprime_scalar(x, f, epsilon=None, args=(), kwargs={},
diff --git a/statsmodels/discrete/tests/test_conditional.py b/statsmodels/discrete/tests/test_conditional.py --- a/statsmodels/discrete/tests/test_conditional.py +++ b/statsmodels/discrete/tests/test_conditional.py @@ -20,12 +20,12 @@ def test_logit_1d(): for x in -1, 0, 1, 2: params = np.r_[x, ] _, grad = model._denom_grad(0, params) - ngrad = approx_fprime(params, lambda x: model._denom(0, x)) + ngrad = approx_fprime(params, lambda x: model._denom(0, x)).squeeze() assert_allclose(grad, ngrad) # Check the gradient for the loglikelihood for x in -1, 0, 1, 2: - grad = approx_fprime(np.r_[x, ], model.loglike) + grad = approx_fprime(np.r_[x, ], model.loglike).squeeze() score = model.score(np.r_[x, ]) assert_allclose(grad, score, rtol=1e-4) @@ -117,7 +117,7 @@ def test_poisson_1d(): # Check the gradient for the loglikelihood for x in -1, 0, 1, 2: - grad = approx_fprime(np.r_[x, ], model.loglike) + grad = approx_fprime(np.r_[x, ], model.loglike).squeeze() score = model.score(np.r_[x, ]) assert_allclose(grad, score, rtol=1e-4) diff --git a/statsmodels/discrete/tests/test_predict.py b/statsmodels/discrete/tests/test_predict.py --- a/statsmodels/discrete/tests/test_predict.py +++ b/statsmodels/discrete/tests/test_predict.py @@ -359,6 +359,7 @@ def test_distr(case): # res = mod.fit() params_dgp = params distr = mod.get_distribution(params_dgp) + assert distr.pmf(1).ndim == 1 try: y2 = distr.rvs(size=(nobs, 1)).squeeze() except ValueError: @@ -378,7 +379,14 @@ def test_distr(case): assert_allclose(res.resid_pearson, (y2 - mean) / np.sqrt(var_), rtol=1e-13) if not issubclass(cls_model, BinaryModel): + # smoke, shape, consistency test + probs = res.predict(which="prob", y_values=np.arange(5)) + assert probs.shape == (len(mod.endog), 5) + probs2 = res.get_prediction( + which="prob", y_values=np.arange(5), average=True) + assert_allclose(probs2.predicted, probs.mean(0), rtol=1e-10) dia = res.get_diagnostic() + dia.probs_predicted # fig = dia.plot_probs(); # fig.suptitle(cls_model.__name__ + repr(kwds), fontsize=16) diff --git a/statsmodels/genmod/tests/test_glm.py b/statsmodels/genmod/tests/test_glm.py --- a/statsmodels/genmod/tests/test_glm.py +++ b/statsmodels/genmod/tests/test_glm.py @@ -255,6 +255,15 @@ def test_get_distribution(self): var_ = res1.predict(which="var_unscaled") assert_allclose(var_ * res_scale, var_endog, rtol=1e-13) + # check get_distribution of results instance + if getattr(self, "has_edispersion", False): + with pytest.warns(UserWarning, match="using scale=1"): + distr3 = res1.get_distribution() + else: + distr3 = res1.get_distribution() + for k in distr2.kwds: + assert_allclose(distr3.kwds[k], distr2.kwds[k], rtol=1e-13) + class CheckComparisonMixin: @@ -865,6 +874,7 @@ def setup_class(cls): res2 = Committee() res2.aic_R += 2 # They do not count a degree of freedom for the scale cls.res2 = res2 + cls.has_edispersion = True # FIXME: enable or delete # def setup_method(self): diff --git a/statsmodels/othermod/tests/test_beta.py b/statsmodels/othermod/tests/test_beta.py --- a/statsmodels/othermod/tests/test_beta.py +++ b/statsmodels/othermod/tests/test_beta.py @@ -2,11 +2,16 @@ import io import os +import pytest + import numpy as np from numpy.testing import assert_allclose, assert_equal import pandas as pd import patsy from statsmodels.api import families +from statsmodels.tools.sm_exceptions import ( + ValueWarning, + ) from statsmodels.othermod.betareg import BetaModel from .results import results_betareg as resultsb @@ -126,6 +131,12 @@ def test_precision_formula(self): assert_close(rslt.params, self.meth_fit.params, 1e-10) assert isinstance(rslt.params, pd.Series) + with pytest.warns(ValueWarning, match="unknown kwargs"): + BetaModel.from_formula(self.model, methylation, + exog_precision_formula='~ age', + link_precision=links.Identity(), + junk=False) + def test_scores(self): model, Z = self.model, self.Z for link in (links.Identity(), links.Log()):
BUG: BetaModel, df_xxx warning, and init kwargs Simple example without specifying exog_precision produces a warning `UserWarning: df_model + k_constant + k_extra differs from k_params` Also, I misspelled exog_precision and did not get an exception. Silently ignored. `__init__` does not check whether `kwargs` are valid
"2023-04-07T18:45:06Z"
0.13
[ "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_bse", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_prediction", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_predict", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_missing", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_offset_exposure", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_prediction", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_r", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_resid", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_mu", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_df", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_aic", "statsmodels/genmod/tests/test_glm.py::TestRegularized::test_regularized", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_resid", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog_formula", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_endog_dtype", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution_binom_count", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::test_attribute_writable_resettable", "statsmodels/genmod/tests/test_glm.py::test_score_test_ols", "statsmodels/genmod/tests/test_glm.py::test_perfect_pred", "statsmodels/genmod/tests/test_glm.py::test_glm_start_params", "statsmodels/genmod/tests/test_glm.py::test_glm_irls_method", "statsmodels/genmod/tests/test_glm.py::test_plots", "statsmodels/genmod/tests/test_glm.py::test_summary", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls", "statsmodels/genmod/tests/test_glm.py::test_loglike_no_opt", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls_eim", "statsmodels/genmod/tests/test_glm.py::test_formula_missing_exposure", "statsmodels/genmod/tests/test_glm.py::test_wtd_patsy_missing", "statsmodels/genmod/tests/test_glm.py::testTweediePowerEstimate", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_poisson_limit", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL", "statsmodels/genmod/tests/test_glm.py::test_glm_lasso_6431", "statsmodels/genmod/tests/test_glm.py::test_tweedie_elastic_net", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_upper_limit", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int8]", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int16]", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int64]", "statsmodels/genmod/tests/test_glm.py::test_glm_bic_warning", "statsmodels/genmod/tests/test_glm.py::test_glm_bic", "statsmodels/genmod/tests/test_glm.py::test_qaic", "statsmodels/genmod/tests/test_glm.py::test_non_invertible_hessian_fails_summary", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int32]", "statsmodels/genmod/tests/test_glm.py::test_int_scale", "statsmodels/genmod/tests/test_glm.py::test_output_exposure_null", "statsmodels/genmod/tests/test_glm.py::test_poisson_deviance", "statsmodels/genmod/tests/test_glm.py::test_tweedie_score", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_init_kwargs", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_scale", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_bic", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_missing", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_deviance", "statsmodels/discrete/tests/test_conditional.py::test_poisson_1d", "statsmodels/discrete/tests/test_conditional.py::test_lasso_poisson", "statsmodels/discrete/tests/test_conditional.py::test_formula", "statsmodels/discrete/tests/test_conditional.py::test_conditional_mnlogit_3d", "statsmodels/discrete/tests/test_conditional.py::test_conditional_mnlogit_2d", "statsmodels/discrete/tests/test_conditional.py::test_poisson_2d", "statsmodels/discrete/tests/test_conditional.py::test_conditional_mnlogit_grad", "statsmodels/discrete/tests/test_conditional.py::test_lasso_logistic", "statsmodels/discrete/tests/test_conditional.py::test_logit_1d", "statsmodels/discrete/tests/test_conditional.py::test_logit_2d", "statsmodels/othermod/tests/test_beta.py::TestBetaModel::test_income_coefficients", "statsmodels/othermod/tests/test_beta.py::TestBetaModel::test_scores", "statsmodels/othermod/tests/test_beta.py::TestBetaModel::test_methylation_precision", "statsmodels/othermod/tests/test_beta.py::TestBetaModel::test_income_precision", "statsmodels/othermod/tests/test_beta.py::TestBetaModel::test_results_other", "statsmodels/othermod/tests/test_beta.py::TestBetaModel::test_methylation_coefficients", "statsmodels/othermod/tests/test_beta.py::TestBetaMeth::test_basic", "statsmodels/othermod/tests/test_beta.py::TestBetaMeth::test_predict_distribution", "statsmodels/othermod/tests/test_beta.py::TestBetaMeth::test_resid", "statsmodels/othermod/tests/test_beta.py::TestBetaMeth::test_oim", "statsmodels/othermod/tests/test_beta.py::TestBetaIncome::test_influence", "statsmodels/othermod/tests/test_beta.py::TestBetaIncome::test_score_test", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_predict_linear", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_influence", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_score_test_alpha", "statsmodels/discrete/tests/test_predict.py::TestGeneralizedPoissonPredict::test_score_test", "statsmodels/discrete/tests/test_predict.py::TestZINegativeBinomialPPredict::test_basic", "statsmodels/discrete/tests/test_predict.py::TestZINegativeBinomialPPredict::test_diagnostic", "statsmodels/discrete/tests/test_predict.py::TestZINegativeBinomialPPredict::test_predict", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_score_test", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_influence", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_basic", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_predict", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_diagnostic", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_predict_linear", "statsmodels/discrete/tests/test_predict.py::TestNegativeBinomialPPredict::test_score_test_alpha" ]
[ "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_get_distribution", "statsmodels/othermod/tests/test_beta.py::TestBetaModel::test_precision_formula", "statsmodels/discrete/tests/test_predict.py::test_distr[case1]", "statsmodels/discrete/tests/test_predict.py::test_distr[case6]", "statsmodels/discrete/tests/test_predict.py::test_distr[case4]", "statsmodels/discrete/tests/test_predict.py::test_distr[case2]", "statsmodels/discrete/tests/test_predict.py::test_distr[case11]", "statsmodels/discrete/tests/test_predict.py::test_distr[case10]", "statsmodels/discrete/tests/test_predict.py::test_distr[case9]", "statsmodels/discrete/tests/test_predict.py::test_distr[case13]", "statsmodels/discrete/tests/test_predict.py::test_distr[case7]", "statsmodels/discrete/tests/test_predict.py::test_distr[case8]", "statsmodels/discrete/tests/test_predict.py::test_distr[case12]", "statsmodels/discrete/tests/test_predict.py::test_distr[case0]", "statsmodels/discrete/tests/test_predict.py::test_distr[case14]", "statsmodels/discrete/tests/test_predict.py::test_distr[case5]", "statsmodels/discrete/tests/test_predict.py::test_distr[case3]" ]
Python
[]
[]
statsmodels/statsmodels
8,801
statsmodels__statsmodels-8801
[ "8696" ]
c967ca06d96b3314891b81b6839ebad5730a540f
diff --git a/statsmodels/robust/norms.py b/statsmodels/robust/norms.py --- a/statsmodels/robust/norms.py +++ b/statsmodels/robust/norms.py @@ -3,6 +3,16 @@ # TODO: add plots to weighting functions for online docs. +def _cabs(x): + """absolute value function that changes complex sign based on real sign + + This could be useful for complex step derivatives of functions that + need abs. Not yet used. + """ + sign = (x.real >= 0) * 2 - 1 + return sign * x + + class RobustNorm: """ The parent class for the norms used for robust regression. @@ -245,11 +255,17 @@ def weights(self, z): weights(z) = t/\|z\| for \|z\| > t """ - z = np.asarray(z) + z_isscalar = np.isscalar(z) + z = np.atleast_1d(z) + test = self._subset(z) absz = np.abs(z) absz[test] = 1.0 - return test + (1 - test) * self.t / absz + v = test + (1 - test) * self.t / absz + + if z_isscalar: + v = v[0] + return v def psi_deriv(self, z): """ @@ -259,7 +275,7 @@ def psi_deriv(self, z): ----- Used to estimate the robust covariance matrix. """ - return np.less_equal(np.abs(z), self.t) + return np.less_equal(np.abs(z), self.t).astype(float) # TODO: untested, but looks right. RamsayE not available in R or SAS? @@ -390,16 +406,16 @@ def rho(self, z): Returns ------- rho : ndarray - rho(z) = a*(1-cos(z/a)) for \|z\| <= a*pi + rho(z) = a**2 *(1-cos(z/a)) for \|z\| <= a*pi - rho(z) = 2*a for \|z\| > a*pi + rho(z) = 2*a for \|z\| > a*pi """ a = self.a z = np.asarray(z) test = self._subset(z) - return (test * a * (1 - np.cos(z / a)) + - (1 - test) * 2 * a) + return (test * a**2 * (1 - np.cos(z / a)) + + (1 - test) * a**2 * 2) def psi(self, z): r""" @@ -415,7 +431,7 @@ def psi(self, z): Returns ------- psi : ndarray - psi(z) = sin(z/a) for \|z\| <= a*pi + psi(z) = a * sin(z/a) for \|z\| <= a*pi psi(z) = 0 for \|z\| > a*pi """ @@ -423,7 +439,7 @@ def psi(self, z): a = self.a z = np.asarray(z) test = self._subset(z) - return test * np.sin(z / a) + return test * a * np.sin(z / a) def weights(self, z): r""" @@ -439,9 +455,9 @@ def weights(self, z): Returns ------- weights : ndarray - weights(z) = sin(z/a)/(z/a) for \|z\| <= a*pi + weights(z) = sin(z/a) / (z/a) for \|z\| <= a*pi - weights(z) = 0 for \|z\| > a*pi + weights(z) = 0 for \|z\| > a*pi """ a = self.a z = np.asarray(z) @@ -467,7 +483,7 @@ def psi_deriv(self, z): """ test = self._subset(z) - return test*np.cos(z / self.a)/self.a + return test * np.cos(z / self.a) # TODO: this is untested @@ -511,12 +527,12 @@ def rho(self, z): rho : ndarray rho(z) = (1/2.)*z**2 for \|z\| <= c - rho(z) = 0 for \|z\| > c + rho(z) = (1/2.)*c**2 for \|z\| > c """ z = np.asarray(z) test = self._subset(z) - return test * z**2 * 0.5 + return test * z**2 * 0.5 + (1 - test) * self.c**2 * 0.5 def psi(self, z): r""" @@ -619,24 +635,33 @@ def rho(self, z): Returns ------- rho : ndarray - rho(z) = (1/2.)*z**2 for \|z\| <= a + rho(z) = z**2 / 2 for \|z\| <= a - rho(z) = a*\|z\| - 1/2.*a**2 for a < \|z\| <= b + rho(z) = a*\|z\| - 1/2.*a**2 for a < \|z\| <= b - rho(z) = a*(c*\|z\|-(1/2.)*z**2)/(c-b) for b < \|z\| <= c + rho(z) = a*(c - \|z\|)**2 / (c - b) / 2 for b < \|z\| <= c - rho(z) = a*(b + c - a) for \|z\| > c + rho(z) = a*(b + c - a) / 2 for \|z\| > c """ + a, b, c = self.a, self.b, self.c + + z_isscalar = np.isscalar(z) + z = np.atleast_1d(z) - z = np.abs(z) - a = self.a - b = self.b - c = self.c t1, t2, t3 = self._subset(z) - v = (t1 * z**2 * 0.5 + - t2 * (a * z - a**2 * 0.5) + - t3 * (a * (c * z - z**2 * 0.5) / (c - b) - 7 * a**2 / 6.) + - (1 - t1 + t2 + t3) * a * (b + c - a)) + t34 = ~(t1 | t2) + dt = np.promote_types(z.dtype, "float") + v = np.zeros(z.shape, dtype=dt) + z = np.abs(z) + v[t1] = z[t1]**2 * 0.5 + # v[t2] = (a * (z[t2] - a) + a**2 * 0.5) + v[t2] = (a * z[t2] - a**2 * 0.5) + v[t3] = a * (c - z[t3])**2 / (c - b) * (-0.5) + v[t34] += a * (b + c - a) * 0.5 + + if z_isscalar: + v = v[0] + return v def psi(self, z): @@ -661,16 +686,23 @@ def psi(self, z): psi(z) = 0 for \|z\| > c """ - z = np.asarray(z) - a = self.a - b = self.b - c = self.c + a, b, c = self.a, self.b, self.c + + z_isscalar = np.isscalar(z) + z = np.atleast_1d(z) + t1, t2, t3 = self._subset(z) + dt = np.promote_types(z.dtype, "float") + v = np.zeros(z.shape, dtype=dt) s = np.sign(z) - z = np.abs(z) - v = s * (t1 * z + - t2 * a*s + - t3 * a*s * (c - z) / (c - b)) + za = np.abs(z) + + v[t1] = z[t1] + v[t2] = a * s[t2] + v[t3] = a * s[t3] * (c - za[t3]) / (c - b) + + if z_isscalar: + v = v[0] return v def weights(self, z): @@ -687,37 +719,51 @@ def weights(self, z): Returns ------- weights : ndarray - weights(z) = 1 for \|z\| <= a + weights(z) = 1 for \|z\| <= a - weights(z) = a/\|z\| for a < \|z\| <= b + weights(z) = a/\|z\| for a < \|z\| <= b weights(z) = a*(c - \|z\|)/(\|z\|*(c-b)) for b < \|z\| <= c - weights(z) = 0 for \|z\| > c + weights(z) = 0 for \|z\| > c """ - z = np.asarray(z) - a = self.a - b = self.b - c = self.c + a, b, c = self.a, self.b, self.c + + z_isscalar = np.isscalar(z) + z = np.atleast_1d(z) + t1, t2, t3 = self._subset(z) - v = np.zeros_like(z) + dt = np.promote_types(z.dtype, "float") + v = np.zeros(z.shape, dtype=dt) v[t1] = 1.0 abs_z = np.abs(z) v[t2] = a / abs_z[t2] abs_zt3 = abs_z[t3] v[t3] = a * (c - abs_zt3) / (abs_zt3 * (c - b)) - v[np.where(np.isnan(v))] = 1. # TODO: for some reason 0 returns a nan? + + if z_isscalar: + v = v[0] return v def psi_deriv(self, z): - t1, _, t3 = self._subset(z) + """Derivative of psi function, second derivative of rho function. + """ a, b, c = self.a, self.b, self.c - # default is t1 - d = np.zeros_like(z) + + z_isscalar = np.isscalar(z) + z = np.atleast_1d(z) + + t1, _, t3 = self._subset(z) + + dt = np.promote_types(z.dtype, "float") + d = np.zeros(z.shape, dtype=dt) d[t1] = 1.0 zt3 = z[t3] - d[t3] = (a * np.sign(zt3) * zt3) / (np.abs(zt3) * (c - b)) + d[t3] = -(a * np.sign(zt3) * zt3) / (np.abs(zt3) * (c - b)) + + if z_isscalar: + d = d[0] return d @@ -764,7 +810,8 @@ def rho(self, z): rho(z) = 0 for \|z\| > R """ subset = self._subset(z) - return -(1 - (z / self.c)**2)**3 * subset * self.c**2 / 6. + factor = self.c**2 / 6. + return -(1 - (z / self.c)**2)**3 * subset * factor + factor def psi(self, z): r"""
diff --git a/statsmodels/robust/tests/results/results_norms.py b/statsmodels/robust/tests/results/results_norms.py new file mode 100644 --- /dev/null +++ b/statsmodels/robust/tests/results/results_norms.py @@ -0,0 +1,33 @@ + + +from statsmodels.tools.testing import Holder + +res_hampel = Holder( + rho=[7.5, 6.833333333333333, 1.875, 0.5, 0.0, 0.5, 1.875, + 6.833333333333333, 7.5], + psi=[0.0, -0.6666666666666666, -1.5, -1.0, 0.0, 1.0, 1.5, + 0.6666666666666666, 0.0], + psi_deriv=[0.0, -0.3333333333333333, 0.0, 1.0, 1.0, 1.0, 0.0, + -0.3333333333333333, 0.0], + weights=[0.0, 0.1111111111111111, 0.75, 1.0, 1.0, 1.0, 0.75, + 0.1111111111111111, 0.0], + ) + +res_biweight = Holder( + rho=[2.6666666666666665, 2.6666666666666665, 1.5416666666666665, + 0.46940104166666663, 0.0, 0.46940104166666663, 1.5416666666666665, + 2.6666666666666665, 2.6666666666666665], + psi=[0.0, 0.0, -1.125, -0.87890625, 0.0, 0.87890625, 1.125, 0.0, 0.0], + psi_deriv=[0.0, 0.0, -0.1875, 0.64453125, 1.0, 0.64453125, -0.1875, 0.0, + 0.0], + weights=[0.0, 0.0, 0.5625, 0.87890625, 1.0, 0.87890625, 0.5625, 0.0, 0.0], + ) + +res_huber = Holder( + rho=[11.200487500000001, 7.165487499999999, 1.7854875000000001, 0.5, 0.0, + 0.5, 1.7854875000000001, 7.165487499999999, 11.200487500000001], + psi=[-1.345, -1.345, -1.345, -1.0, 0.0, 1.0, 1.345, 1.345, 1.345], + psi_deriv=[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0], + weights=[0.14944444444444444, 0.22416666666666665, 0.6725, 1.0, 1.0, 1.0, + 0.6725, 0.22416666666666665, 0.14944444444444444], + ) diff --git a/statsmodels/robust/tests/test_norms.py b/statsmodels/robust/tests/test_norms.py new file mode 100644 --- /dev/null +++ b/statsmodels/robust/tests/test_norms.py @@ -0,0 +1,96 @@ + +import pytest +import numpy as np +from numpy.testing import assert_allclose + +from statsmodels.robust import norms +from statsmodels.tools.numdiff import ( + _approx_fprime_scalar, + # _approx_fprime_cs_scalar, # not yet + ) +from .results import results_norms as res_r + +cases = [ + (norms.Hampel, (1.5, 3.5, 8.), res_r.res_hampel), + (norms.TukeyBiweight, (4,), res_r.res_biweight), + (norms.HuberT, (1.345,), res_r.res_huber), + ] + +norms_other = [ + (norms.LeastSquares, ()), + (norms.TrimmedMean, (1.9,)), # avoid arg at integer used in example + (norms.AndrewWave, ()), + (norms.RamsayE, ()), + # norms.MQuantileNorm, # requires keywords in init + ] + +dtypes = ["int", np.float64, np.complex128] + + [email protected]("dtype", dtypes) [email protected]("case", cases) +def test_norm(case, dtype): + ncls, args, res = case + if ncls in [norms.HuberT] and dtype == np.complex128: + # skip for now + return + + norm = ncls(*args) + x = np.array([-9, -6, -2, -1, 0, 1, 2, 6, 9], dtype=dtype) + + weights = norm.weights(x) + rho = norm.rho(x) + psi = norm.psi(x) + psi_deriv = norm.psi_deriv(x) + assert_allclose(weights, res.weights, rtol=1e-12, atol=1e-20) + assert_allclose(rho, res.rho, rtol=1e-12, atol=1e-20) + assert_allclose(psi, res.psi, rtol=1e-12, atol=1e-20) + assert_allclose(psi_deriv, res.psi_deriv, rtol=1e-12, atol=1e-20) + + dtype2 = np.promote_types(dtype, "float") + assert weights.dtype == dtype2 + assert rho.dtype == dtype2 + assert psi.dtype == dtype2 + assert psi_deriv.dtype == dtype2 + + psid = _approx_fprime_scalar(x, norm.rho) + assert_allclose(psid, res.psi, rtol=1e-6, atol=1e-8) + psidd = _approx_fprime_scalar(x, norm.psi) + assert_allclose(psidd, res.psi_deriv, rtol=1e-6, atol=1e-8) + + # complex step derivatives are not yet supported if method uses np.abs + # psid = _approx_fprime_cs_scalar(x, norm.rho) + # assert_allclose(psid, res.psi, rtol=1e-12, atol=1e-20) + # psidd = _approx_fprime_cs_scalar(x, norm.psi) + # assert_allclose(psidd, res.psi_deriv, rtol=1e-12, atol=1e-20) + + # check scalar value + methods = ["weights", "rho", "psi", "psi_deriv"] + for meth in methods: + resm = [getattr(norm, meth)(xi) for xi in x] + assert_allclose(resm, getattr(res, meth)) + + [email protected]("case", norms_other) +def test_norms_consistent(case): + # test that norm methods are consistent with each other + ncls, args = case + norm = ncls(*args) + x = np.array([-9, -6, -2, -1, 0, 1, 2, 6, 9], dtype=float) + + weights = norm.weights(x) + rho = norm.rho(x) # not used + psi = norm.psi(x) + psi_deriv = norm.psi_deriv(x) + + # check location and u-shape of rho + assert rho[4] == 0 + assert np.all(np.diff(rho[4:]) >= 0) + assert np.all(np.diff(rho[:4]) <= 0) + + # avoid zero division nan: + assert_allclose(weights, (psi + 1e-50) / (x + 1e-50), rtol=1e-6, atol=1e-8) + psid = _approx_fprime_scalar(x, norm.rho) + assert_allclose(psi, psid, rtol=1e-6, atol=1e-8) + psidd = _approx_fprime_scalar(x, norm.psi) + assert_allclose(psi_deriv, psidd, rtol=1e-6, atol=1e-8)
BUG: incorrect definition of piecewise boolean indicator array in `Hampel.rho` (*robust* module) #### Describe the bug There is a bug in `class Hampel` within [statsmodels/robust/norms.py](https://github.com/statsmodels/statsmodels/blob/main/statsmodels/robust/norms.py) in defining piecewise function over `z` in `Hampel.rho` method. It results in incorrect values for the `Hampel.rho` method outcomes. The `rho` method uses `_subset` method to calculate boolean indicator arrays for `t1`, `t2`, and `t3` pieces, with the last piece `t4` defining the whole range needed for the Hampel's $\rho$ function being calculated in the `rho` method. This last piece is incorrect in the calculation of the boolean indicator array, I show in the code below. Note that I can't reproduce the issue perfectly, since `Hampel.rho` does not rely on `Hampel._subset` entirely in defining piecewise boolean indicator arrays, as opposed to `Hampel.psi` or `Hampel.weights`. #### Code Sample, a copy-pastable example if possible ```python import statsmodels.api as sm # create hampel norm with default parameters a, b, c hampel = sm.robust.norms.Hampel(a=2., b=4., c=8.) # create specially crafted t1, t2, t3 t1, t2, t3 = hampel._subset([1, 3, 5, 9]) # calculate t4 as in `Hampel.rho` (line 639 calculates that implicitly, without assigning variable `t4`) t4 = (1 - t1 + t2 + t3) # t4 gives `array([0, 2, 2, 1])` ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `main`. If your problem has been fixed in an unreleased version, you might be able to use `main` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the main branch of this repository? It helps the limited resources if we know problems exist in the current main branch so that they do not need to check whether the code sample produces a bug in the next release. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output The implicit `t4` that is calculated "on the fly" in `Hampel.rho` should result in an array `array([0, 0, 0, 1])` to pick only the last piecewise boolean indicator array filter. In fact, it is obvious the "on the fly" calculation in `Hampel.rho` line 639 should be ```python (1 - (t1 + t2 + t3)) * a * (b + c - a)) ``` rather than the existing ```python (1 - t1 + t2 + t3) * a * (b + c - a)) ``` #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> INSTALLED VERSIONS ------------------ Python: 3.8.10.final.0 OS: Linux 5.10.102.1-microsoft-standard-WSL2 #1 SMP Wed Mar 2 00:30:59 UTC 2022 x86_64 byteorder: little LC_ALL: None LANG: C.UTF-8 statsmodels =========== Installed: 0.13.5 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.22.3 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/numpy) scipy: 1.8.0 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/scipy) pandas: 1.4.1 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/pandas) dateutil: 2.8.2 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/dateutil) patsy: 0.5.3 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.5.1 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/matplotlib) backend: module://matplotlib_inline.backend_inline cvxopt: Not installed joblib: 1.1.0 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/joblib) Developer Tools ================ IPython: 8.1.1 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/IPython) jinja2: 3.0.3 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/jinja2) sphinx: Not installed pygments: 2.11.2 (/home/kamil/.cache/pypoetry/virtualenvs/smps-aJ6sAQBH-py3.8/lib/python3.8/site-packages/pygments) pytest: Not installed virtualenv: Not installed </details>
Note that this affects the same line of code as in #8697 but a different problem is raised in this BUG issue.
"2023-04-15T14:00:11Z"
0.14
[ "statsmodels/robust/tests/test_norms.py::test_norms_consistent[case0]", "statsmodels/robust/tests/test_norms.py::test_norm[case2-complex128]", "statsmodels/robust/tests/test_norms.py::test_norms_consistent[case3]" ]
[ "statsmodels/robust/tests/test_norms.py::test_norm[case2-int]", "statsmodels/robust/tests/test_norms.py::test_norm[case0-int]", "statsmodels/robust/tests/test_norms.py::test_norm[case1-int]", "statsmodels/robust/tests/test_norms.py::test_norms_consistent[case1]", "statsmodels/robust/tests/test_norms.py::test_norm[case0-float64]", "statsmodels/robust/tests/test_norms.py::test_norm[case0-complex128]", "statsmodels/robust/tests/test_norms.py::test_norm[case2-float64]", "statsmodels/robust/tests/test_norms.py::test_norm[case1-complex128]", "statsmodels/robust/tests/test_norms.py::test_norms_consistent[case2]", "statsmodels/robust/tests/test_norms.py::test_norm[case1-float64]" ]
Python
[]
[]
statsmodels/statsmodels
8,828
statsmodels__statsmodels-8828
[ "8438" ]
3e739c26729e2c844394f5918846e2897ad0f871
diff --git a/statsmodels/tsa/forecasting/theta.py b/statsmodels/tsa/forecasting/theta.py --- a/statsmodels/tsa/forecasting/theta.py +++ b/statsmodels/tsa/forecasting/theta.py @@ -151,6 +151,8 @@ def __init__( "model", options=("auto", "additive", "multiplicative", "mul", "add"), ) + if self._method == "auto": + self._method = "mul" if self._y.min() > 0 else "add" if self._period is None and self._deseasonalize: idx = getattr(endog, "index", None) pfreq = None @@ -183,9 +185,6 @@ def _deseasonalize_data(self) -> Tuple[np.ndarray, np.ndarray]: y = self._y if not self._has_seasonality: return self._y, np.empty(0) - self._method = ( - "mul" if self._method == "auto" and self._y.min() > 0 else "add" - ) res = seasonal_decompose(y, model=self._method, period=self._period) if res.seasonal.min() <= 0:
diff --git a/statsmodels/tsa/forecasting/tests/test_theta.py b/statsmodels/tsa/forecasting/tests/test_theta.py --- a/statsmodels/tsa/forecasting/tests/test_theta.py +++ b/statsmodels/tsa/forecasting/tests/test_theta.py @@ -133,3 +133,29 @@ def test_forecast_seasonal_alignment(data, period): index = np.arange(data.shape[0], data.shape[0] + comp.shape[0]) expected = seasonal[index % period] np.testing.assert_allclose(comp.seasonal, expected) + + +def test_auto(reset_randomstate): + m = 250 + e = np.random.standard_normal(m) + s = 10 * np.sin(np.linspace(0, np.pi, 12)) + s = np.tile(s, (m // 12 + 1))[:m] + idx = pd.period_range("2000-01-01", freq="M", periods=m) + x = e + s + y = pd.DataFrame(10 + x - x.min(), index=idx) + + tm = ThetaModel(y, method="auto") + assert tm.method == "mul" + res = tm.fit() + + tm = ThetaModel(y, method="mul") + assert tm.method == "mul" + res2 = tm.fit() + + np.testing.assert_allclose(res.params, res2.params) + + tm = ThetaModel(y - y.mean(), method="auto") + assert tm.method == "add" + res3 = tm.fit() + + assert not np.allclose(res.params, res3.params)
Theta deseasonalizing multiplicative method not working #### Describe the bug When using the forecasting Theta model I found some strange behavior in the deseasonalizing of the data. There should be three deseasonalize methods: β€œauto”, β€œadditive” and β€œmultiplicative” (https://www.statsmodels.org/dev/generated/statsmodels.tsa.forecasting.theta.ThetaModel.html#statsmodels.tsa.forecasting.theta.ThetaModel). However, when I use the "auto" method, the output is not equal to the ouput of the β€œadditive” or β€œmultiplicative” method. I had a look in the source code and suspect that this is caused by the _deseasonalize_data() function in the Theta model. In this function the following code is used: **self._method = ( "mul" if self._method == "auto" and self._y.min() > 0 else "add" )** The "multiplicative” method is not considered here and is returned as "additive" if I'm not mistaking. This should not be the case if you can give it as input. <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `main`. If your problem has been fixed in an unreleased version, you might be able to use `main` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the main branch of this repository? It helps the limited resources if we know problems exist in the current main branch so that they do not need to check whether the code sample produces a bug in the next release. </details> #### Expected Output Solve the issue. When for deseasonalizing the "multiplicative" method is selected, it should also be used. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] INSTALLED VERSIONS ------------------ Python: 3.9.12.final.0 statsmodels =========== Installed: 0.13.0 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\statsmodels) Required Dependencies ===================== cython: 0.29.24 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\Cython) numpy: 1.21.4 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\numpy) scipy: 1.5.4 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\scipy) pandas: 1.3.4 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\pandas) dateutil: 2.8.2 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\dateutil) patsy: 0.5.2 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\patsy) Optional Dependencies ===================== matplotlib: 3.5.2 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\matplotlib) Backend TkAgg is interactive backend. Turning interactive mode on. backend: TkAgg cvxopt: Not installed joblib: 1.1.0 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\joblib) Developer Tools ================ IPython: 8.4.0 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\IPython) jinja2: 3.1.2 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\jinja2) sphinx: 4.5.0 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\sphinx) pygments: 2.12.0 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\pygments) pytest: 6.2.5 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\pytest) virtualenv: 20.16.2 (C:\Users\CasRoelants\anaconda3\envs\honeycomb-env\lib\site-packages\virtualenv)
Any updates?
"2023-04-24T08:51:18Z"
0.14
[ "statsmodels/tsa/forecasting/tests/test_theta.py::test_smoke[type:", "statsmodels/tsa/forecasting/tests/test_theta.py::test_forecast_seasonal_alignment[type:", "statsmodels/tsa/forecasting/tests/test_theta.py::test_forecast_errors[type:", "statsmodels/tsa/forecasting/tests/test_theta.py::test_alt_index[datetime]", "statsmodels/tsa/forecasting/tests/test_theta.py::test_alt_index[range]", "statsmodels/tsa/forecasting/tests/test_theta.py::test_alt_index[nofreq]", "statsmodels/tsa/forecasting/tests/test_theta.py::test_pi_width", "statsmodels/tsa/forecasting/tests/test_theta.py::test_alt_index[period]", "statsmodels/tsa/forecasting/tests/test_theta.py::test_no_freq" ]
[ "statsmodels/tsa/forecasting/tests/test_theta.py::test_auto" ]
Python
[]
[]
statsmodels/statsmodels
8,831
statsmodels__statsmodels-8831
[ "8634" ]
3e739c26729e2c844394f5918846e2897ad0f871
diff --git a/statsmodels/tsa/holtwinters/_smoothers.py b/statsmodels/tsa/holtwinters/_smoothers.py --- a/statsmodels/tsa/holtwinters/_smoothers.py +++ b/statsmodels/tsa/holtwinters/_smoothers.py @@ -192,7 +192,7 @@ def holt_mul_dam(x, hw_args: HoltWintersArgs): for i in range(1, hw_args.n): lvl[i] = (y_alpha[i - 1]) + (alphac * (lvl[i - 1] * b[i - 1] ** phi)) b[i] = (beta * (lvl[i] / lvl[i - 1])) + (betac * b[i - 1] ** phi) - return hw_args.y - lvl * b ** phi + return hw_args.y - lvl * b**phi def holt_add_dam(x, hw_args: HoltWintersArgs): @@ -337,7 +337,7 @@ def holt_win_mul_mul_dam(x, hw_args: HoltWintersArgs): s[i + m - 1] = (y_gamma[i - 1] / (lvl[i - 1] * b[i - 1] ** phi)) + ( gammac * s[i - 1] ) - return hw_args.y - (lvl * b ** phi) * s[: -(m - 1)] + return hw_args.y - (lvl * b**phi) * s[: -(m - 1)] def holt_win_add_add_dam(x, hw_args: HoltWintersArgs): diff --git a/statsmodels/tsa/holtwinters/model.py b/statsmodels/tsa/holtwinters/model.py --- a/statsmodels/tsa/holtwinters/model.py +++ b/statsmodels/tsa/holtwinters/model.py @@ -287,8 +287,8 @@ def __init__( ) estimated = self._initialization_method == "estimated" self._estimate_level = estimated - self._estimate_trend = estimated and self.trend - self._estimate_seasonal = estimated and self.seasonal + self._estimate_trend = estimated and self.trend is not None + self._estimate_seasonal = estimated and self.seasonal is not None self._bounds = self._check_bounds(bounds) self._use_boxcox = use_boxcox self._lambda = np.nan @@ -765,8 +765,6 @@ def _optimize_parameters( beta = data.beta phi = data.phi gamma = data.gamma - initial_level = data.level - initial_trend = data.trend y = data.y start_params = data.params @@ -796,11 +794,11 @@ def _optimize_parameters( alpha is None, has_trend and beta is None, has_seasonal and gamma is None, - initial_level is None, - has_trend and initial_trend is None, + self._estimate_level, + self._estimate_trend, damped_trend and phi is None, ] - + [has_seasonal] * m, + + [has_seasonal and self._estimate_seasonal] * m, ) ( sel, diff --git a/statsmodels/tsa/holtwinters/results.py b/statsmodels/tsa/holtwinters/results.py --- a/statsmodels/tsa/holtwinters/results.py +++ b/statsmodels/tsa/holtwinters/results.py @@ -668,7 +668,7 @@ def simulate( resid = self.model._y - fitted else: resid = (self.model._y - fitted) / fitted - sigma = np.sqrt(np.sum(resid ** 2) / (len(resid) - n_params)) + sigma = np.sqrt(np.sum(resid**2) / (len(resid) - n_params)) # get random error eps if isinstance(random_errors, np.ndarray):
diff --git a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py --- a/statsmodels/tsa/holtwinters/tests/test_holtwinters.py +++ b/statsmodels/tsa/holtwinters/tests/test_holtwinters.py @@ -43,6 +43,62 @@ SEASONALS = ("add", "mul", None) TRENDS = ("add", "mul", None) +# aust = pd.read_json(aust_json, typ='Series').sort_index() +data = [ + 41.727457999999999, + 24.04185, + 32.328102999999999, + 37.328707999999999, + 46.213152999999998, + 29.346326000000001, + 36.482909999999997, + 42.977719, + 48.901524999999999, + 31.180221, + 37.717880999999998, + 40.420211000000002, + 51.206862999999998, + 31.887228, + 40.978262999999998, + 43.772491000000002, + 55.558566999999996, + 33.850915000000001, + 42.076383, + 45.642291999999998, + 59.766779999999997, + 35.191876999999998, + 44.319737000000003, + 47.913736, +] +index = [ + "2005-03-01 00:00:00", + "2005-06-01 00:00:00", + "2005-09-01 00:00:00", + "2005-12-01 00:00:00", + "2006-03-01 00:00:00", + "2006-06-01 00:00:00", + "2006-09-01 00:00:00", + "2006-12-01 00:00:00", + "2007-03-01 00:00:00", + "2007-06-01 00:00:00", + "2007-09-01 00:00:00", + "2007-12-01 00:00:00", + "2008-03-01 00:00:00", + "2008-06-01 00:00:00", + "2008-09-01 00:00:00", + "2008-12-01 00:00:00", + "2009-03-01 00:00:00", + "2009-06-01 00:00:00", + "2009-09-01 00:00:00", + "2009-12-01 00:00:00", + "2010-03-01 00:00:00", + "2010-06-01 00:00:00", + "2010-09-01 00:00:00", + "2010-12-01 00:00:00", +] +idx = pd.to_datetime(index) +aust = pd.Series(data, index=pd.DatetimeIndex(idx, freq=pd.infer_freq(idx))) + @pytest.fixture(scope="module") def ses(): @@ -240,63 +296,6 @@ def setup_class(cls): ) cls.livestock2_livestock = livestock2_livestock - # aust = pd.read_json(aust_json, typ='Series').sort_index() - data = [ - 41.727457999999999, - 24.04185, - 32.328102999999999, - 37.328707999999999, - 46.213152999999998, - 29.346326000000001, - 36.482909999999997, - 42.977719, - 48.901524999999999, - 31.180221, - 37.717880999999998, - 40.420211000000002, - 51.206862999999998, - 31.887228, - 40.978262999999998, - 43.772491000000002, - 55.558566999999996, - 33.850915000000001, - 42.076383, - 45.642291999999998, - 59.766779999999997, - 35.191876999999998, - 44.319737000000003, - 47.913736, - ] - index = [ - "2005-03-01 00:00:00", - "2005-06-01 00:00:00", - "2005-09-01 00:00:00", - "2005-12-01 00:00:00", - "2006-03-01 00:00:00", - "2006-06-01 00:00:00", - "2006-09-01 00:00:00", - "2006-12-01 00:00:00", - "2007-03-01 00:00:00", - "2007-06-01 00:00:00", - "2007-09-01 00:00:00", - "2007-12-01 00:00:00", - "2008-03-01 00:00:00", - "2008-06-01 00:00:00", - "2008-09-01 00:00:00", - "2008-12-01 00:00:00", - "2009-03-01 00:00:00", - "2009-06-01 00:00:00", - "2009-09-01 00:00:00", - "2009-12-01 00:00:00", - "2010-03-01 00:00:00", - "2010-06-01 00:00:00", - "2010-09-01 00:00:00", - "2010-12-01 00:00:00", - ] - aust = pd.Series(data, index) - aust.index = pd.DatetimeIndex( - aust.index, freq=pd.infer_freq(aust.index) - ) cls.aust = aust cls.start_params = [ 1.5520372162082909e-09, @@ -519,7 +518,11 @@ def test_holt_damp_r(self): # livestock2_livestock <- c(...) # res <- ets(livestock2_livestock, model='AAN', damped_trend=TRUE, # phi=0.98) - mod = Holt(self.livestock2_livestock, damped_trend=True) + mod = Holt( + self.livestock2_livestock, + damped_trend=True, + initialization_method="estimated", + ) params = { "smoothing_level": 0.97402626, "smoothing_trend": 0.00010006, @@ -1646,11 +1649,11 @@ def test_error_boxcox(): with pytest.raises(TypeError, match="use_boxcox must be True"): ExponentialSmoothing(y, use_boxcox="a", initialization_method="known") - mod = ExponentialSmoothing(y ** 2, use_boxcox=True) + mod = ExponentialSmoothing(y**2, use_boxcox=True) assert isinstance(mod, ExponentialSmoothing) mod = ExponentialSmoothing( - y ** 2, use_boxcox=True, initialization_method="legacy-heuristic" + y**2, use_boxcox=True, initialization_method="legacy-heuristic" ) with pytest.raises(ValueError, match="use_boxcox was set"): mod.fit(use_boxcox=False) @@ -1950,7 +1953,7 @@ def test_attributes(ses): def test_summary_boxcox(ses): mod = ExponentialSmoothing( - ses ** 2, use_boxcox=True, initialization_method="heuristic" + ses**2, use_boxcox=True, initialization_method="heuristic" ) with pytest.raises(ValueError, match="use_boxcox was set at model"): mod.fit(use_boxcox=True) @@ -2111,3 +2114,40 @@ def test_invalid_index(reset_randomstate): fitted = model.fit(optimized=True, use_brute=True) with pytest.warns(ValueWarning, match="No supported"): fitted.forecast(steps=157200) + + +def test_initial_level(): + # GH 8634 + series = [0.0, 0.0, 0.0, 100.0, 0.0, 0.0, 0.0] + es = ExponentialSmoothing( + series, initialization_method="known", initial_level=20.0 + ) + es_fit = es.fit() + es_fit.params + assert_allclose(es_fit.params["initial_level"], 20.0) + + +def test_all_initial_values(): + fit1 = ExponentialSmoothing( + aust, + seasonal_periods=4, + trend="add", + seasonal="mul", + initialization_method="estimated", + ).fit() + lvl = np.round(fit1.params["initial_level"]) + trend = np.round(fit1.params["initial_trend"], 1) + seas = np.round(fit1.params["initial_seasons"], 1) + fit2 = ExponentialSmoothing( + aust, + seasonal_periods=4, + trend="add", + seasonal="mul", + initialization_method="known", + initial_level=lvl, + initial_trend=trend, + initial_seasonal=seas, + ).fit() + assert_allclose(fit2.params["initial_level"], lvl) + assert_allclose(fit2.params["initial_trend"], trend) + assert_allclose(fit2.params["initial_seasons"], seas)
initial_level doesn't appear to be respected in ExponentialSmoothing class #### Describe the bug [A clear and concise description of what the bug is. This should explain **why** the current behaviour is a problem and why the expected output is a better solution.] #### Code Sample, a copy-pastable example if possible ```python from statsmodels.tsa.holtwinters import ExponentialSmoothing series = [0.0, 0.0, 0.0, 100.0, 0.0, 0.0, 0.0] es = ExponentialSmoothing( series, initialization_method='known', initial_level=20.0 ) es_fit = es.fit() es_fit.params ``` Outputs: ```python {'smoothing_level': 1.4901161193847656e-08, 'smoothing_trend': nan, 'smoothing_seasonal': nan, 'damping_trend': nan, 'initial_level': 14.239682618110056, 'initial_trend': nan, 'initial_seasons': array([], dtype=float64), 'use_boxcox': False, 'lamda': None, 'remove_bias': False} ``` <details> **Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates. **Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `main`. If your problem has been fixed in an unreleased version, you might be able to use `main` until a new release occurs. **Note**: If you are using a released version, have you verified that the bug exists in the main branch of this repository? It helps the limited resources if we know problems exist in the current main branch so that they do not need to check whether the code sample produces a bug in the next release. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output `es_fit.params` should return 20.0 in the `initial_level` field Additionally, when calling `es_fit.fittedvalues` I was expecting the first returned value to be 20.0 #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] INSTALLED VERSIONS ------------------ Python: 3.10.6.final.0 OS: Linux 5.10.147-133.644.amzn2.x86_64 #1 SMP Fri Oct 14 01:16:24 UTC 2022 x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.13.2 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/statsmodels) Required Dependencies ===================== cython: 0.29.32 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/Cython) numpy: 1.22.4 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/numpy) scipy: 1.9.3 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/scipy) pandas: 1.5.1 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/pandas) dateutil: 2.8.2 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/dateutil) patsy: 0.5.3 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.6.1 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/matplotlib) backend: module://matplotlib_inline.backend_inline cvxopt: Not installed joblib: 1.2.0 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/joblib) Developer Tools ================ IPython: 7.33.0 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/IPython) jinja2: 3.1.2 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/jinja2) sphinx: 5.3.0 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/sphinx) pygments: 2.13.0 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/pygments) pytest: 7.2.0 (/home/ec2-user/anaconda3/envs/python3/lib/python3.10/site-packages/pytest) virtualenv: Not installed </details>
Thanks for the report. In the meantime, you might consider using `ETSModel` which is the most recent/supported exponential smoothing model.
"2023-04-24T11:33:50Z"
0.14
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_ndarray", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_add_mul", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_hw_seasonal_buggy", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_fit", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_simple_exp_smoothing", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt_damp_r", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_holt", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::TestHoltWinters::test_predict", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_summary_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[SLSQP]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_boxcox", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_valid_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_basic", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_integer_array", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[TNC]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_dampen", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[Powell]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_attributes", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_different_inputs", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[basinhopping]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_to_restricted_equiv[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[irregular]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[L-BFGS-B]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infer_freq", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal2]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[bootstrap-10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_index", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_error_initialization", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_no_params_to_optimize", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[least_squares]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[2000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_damping_trend_zero", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_alternative_minimizers[trust-constr]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-None-legacy-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_direct_holt_add", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[100]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index[1000]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[period]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_start_param_length", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[10-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-None-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_index_types[date_range]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_brute[add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fixed_errors", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_boxcox_components", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_set_parameters", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_negative_multipliative[trend_seasonal0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_dampen_no_trend[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-True-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[add-False-add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_bad_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_keywords", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_forecast_1_simulation[None-10]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params1]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[add-None-estimated]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-False-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_debiased", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[None-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_fix_unfixable", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_minimizer_kwargs_error", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_infeasible_bounds", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_simulate_expected_r[mul-True-add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_restricted_round_tip[params0]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-mul]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_float_boxcox[mul-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_estimated_initialization_short_data[9-add-add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_basin_hopping", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_equivalence_cython_python[add-None]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_invalid_seasonal[add]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_2d_data", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initialization_methods[None-add-heuristic]", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_start_params[None-add]" ]
[ "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_initial_level", "statsmodels/tsa/holtwinters/tests/test_holtwinters.py::test_all_initial_values" ]
Python
[]
[]
statsmodels/statsmodels
8,834
statsmodels__statsmodels-8834
[ "8628" ]
4fee62a4633cfa8ec5db1909fbff5cae572e922e
diff --git a/statsmodels/discrete/discrete_model.py b/statsmodels/discrete/discrete_model.py --- a/statsmodels/discrete/discrete_model.py +++ b/statsmodels/discrete/discrete_model.py @@ -522,7 +522,7 @@ def predict(self, params, exog=None, which="mean", linear=None, """ if linear is not None: msg = 'linear keyword is deprecated, use which="linear"' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if linear is True: which = "linear" @@ -752,7 +752,7 @@ def predict(self, params, exog=None, which="mean", linear=None): """ if linear is not None: msg = 'linear keyword is deprecated, use which="linear"' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if linear is True: which = "linear" @@ -1012,7 +1012,7 @@ def predict(self, params, exog=None, exposure=None, offset=None, """ if linear is not None: msg = 'linear keyword is deprecated, use which="linear"' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if linear is True: which = "linear" @@ -1634,7 +1634,7 @@ def predict(self, params, exog=None, exposure=None, offset=None, if linear is not None: msg = 'linear keyword is deprecated, use which="linear"' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if linear is True: which = "linear" @@ -3584,7 +3584,7 @@ def predict(self, params, exog=None, exposure=None, offset=None, if linear is not None: msg = 'linear keyword is deprecated, use which="linear"' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if linear is True: which = "linear" diff --git a/statsmodels/genmod/generalized_linear_model.py b/statsmodels/genmod/generalized_linear_model.py --- a/statsmodels/genmod/generalized_linear_model.py +++ b/statsmodels/genmod/generalized_linear_model.py @@ -897,7 +897,7 @@ def predict(self, params, exog=None, exposure=None, offset=None, """ if linear is not None: msg = 'linear keyword is deprecated, use which="linear"' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if linear is True: which = "linear" diff --git a/statsmodels/sandbox/stats/runs.py b/statsmodels/sandbox/stats/runs.py --- a/statsmodels/sandbox/stats/runs.py +++ b/statsmodels/sandbox/stats/runs.py @@ -477,7 +477,7 @@ def cochrans_q(x): ''' - warnings.warn("Deprecated, use stats.cochrans_q instead", DeprecationWarning) + warnings.warn("Deprecated, use stats.cochrans_q instead", FutureWarning) x = np.asarray(x) gruni = np.unique(x) @@ -539,7 +539,7 @@ def mcnemar(x, y=None, exact=True, correction=True): ''' - warnings.warn("Deprecated, use stats.TableSymmetry instead", DeprecationWarning) + warnings.warn("Deprecated, use stats.TableSymmetry instead", FutureWarning) x = np.asarray(x) if y is None and x.shape[0] == x.shape[1]: @@ -605,7 +605,7 @@ def symmetry_bowker(table): ''' - warnings.warn("Deprecated, use stats.TableSymmetry instead", DeprecationWarning) + warnings.warn("Deprecated, use stats.TableSymmetry instead", FutureWarning) table = np.asarray(table) k, k2 = table.shape diff --git a/statsmodels/stats/_diagnostic_other.py b/statsmodels/stats/_diagnostic_other.py --- a/statsmodels/stats/_diagnostic_other.py +++ b/statsmodels/stats/_diagnostic_other.py @@ -203,7 +203,7 @@ def dispersion_poisson(results): alternative hypothesis. """ msg = 'dispersion_poisson here is deprecated, use ' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) from statsmodels.discrete._diagnostics_count import test_poisson_dispersion return test_poisson_dispersion(results, _old=True) @@ -226,7 +226,7 @@ def dispersion_poisson_generic(results, exog_new_test, exog_new_control=None, Warning: insufficiently tested, especially for options """ msg = 'dispersion_poisson here is deprecated, use ' - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) from statsmodels.discrete._diagnostics_count import ( _test_poisson_dispersion_generic diff --git a/statsmodels/stats/outliers_influence.py b/statsmodels/stats/outliers_influence.py --- a/statsmodels/stats/outliers_influence.py +++ b/statsmodels/stats/outliers_influence.py @@ -609,7 +609,7 @@ def _get_prediction(self): with warnings.catch_warnings(): msg = 'linear keyword is deprecated, use which="linear"' warnings.filterwarnings("ignore", message=msg, - category=DeprecationWarning) + category=FutureWarning) pred = self.results.get_prediction() return pred diff --git a/statsmodels/stats/rates.py b/statsmodels/stats/rates.py --- a/statsmodels/stats/rates.py +++ b/statsmodels/stats/rates.py @@ -752,7 +752,7 @@ def test_poisson_2indep(count1, exposure1, count2, exposure2, value=None, value = ratio_null = 1 elif ratio_null is not None: warnings.warn("'ratio_null' is deprecated, use 'value' keyword", - DeprecationWarning) + FutureWarning) value = ratio_null else: # for results holder instance, it still contains ratio_null @@ -979,7 +979,7 @@ def etest_poisson_2indep(count1, exposure1, count2, exposure2, ratio_null=None, value = 1 elif ratio_null is not None: warnings.warn("'ratio_null' is deprecated, use 'value' keyword", - DeprecationWarning) + FutureWarning) value = ratio_null r = value # rate1 / rate2 @@ -1037,7 +1037,7 @@ def stat_func(x1, x2): stat_sample = stat_func(y1, y2) if ygrid is not None: - warnings.warn("ygrid is deprecated, use y_grid", DeprecationWarning) + warnings.warn("ygrid is deprecated, use y_grid", FutureWarning) y_grid = y_grid if y_grid is not None else ygrid # The following uses a fixed truncation for evaluating the probabilities diff --git a/statsmodels/tsa/base/tsa_model.py b/statsmodels/tsa/base/tsa_model.py --- a/statsmodels/tsa/base/tsa_model.py +++ b/statsmodels/tsa/base/tsa_model.py @@ -433,7 +433,7 @@ def get_prediction_index( " version, calling this method in a model" " without a supported index will result in an" " exception.", - DeprecationWarning, + FutureWarning, stacklevel=2, ) elif index_none: diff --git a/statsmodels/tsa/statespace/simulation_smoother.py b/statsmodels/tsa/statespace/simulation_smoother.py --- a/statsmodels/tsa/statespace/simulation_smoother.py +++ b/statsmodels/tsa/statespace/simulation_smoother.py @@ -626,7 +626,7 @@ def simulate(self, simulation_output=-1, msg = ('`disturbance_variates` keyword is deprecated, use' ' `measurement_disturbance_variates` and' ' `state_disturbance_variates` instead.') - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if (measurement_disturbance_variates is not None or state_disturbance_variates is not None): raise ValueError('Cannot use `disturbance_variates` in' @@ -642,7 +642,7 @@ def simulate(self, simulation_output=-1, msg = ('`pretransformed` keyword is deprecated, use' ' `pretransformed_measurement_disturbance_variates` and' ' `pretransformed_state_disturbance_variates` instead.') - warnings.warn(msg, DeprecationWarning) + warnings.warn(msg, FutureWarning) if (pretransformed_measurement_disturbance_variates is not None or pretransformed_state_disturbance_variates is not None): raise ValueError(
diff --git a/statsmodels/genmod/tests/test_glm.py b/statsmodels/genmod/tests/test_glm.py --- a/statsmodels/genmod/tests/test_glm.py +++ b/statsmodels/genmod/tests/test_glm.py @@ -990,7 +990,7 @@ def test_predict(self): # Check that offset shifts the linear predictor mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit() offset = np.random.uniform(1, 2, 10) - with pytest.warns(DeprecationWarning): + with pytest.warns(FutureWarning): # deprecation warning for linear keyword pred1 = mod3.predict(exog=exog1, offset=offset, linear=True) pred2 = mod3.predict(exog=exog1, offset=2*offset, which="linear") diff --git a/statsmodels/stats/tests/test_nonparametric.py b/statsmodels/stats/tests/test_nonparametric.py --- a/statsmodels/stats/tests/test_nonparametric.py +++ b/statsmodels/stats/tests/test_nonparametric.py @@ -92,22 +92,22 @@ def test_mcnemar_chisquare(): def test_mcnemar_vectorized(reset_randomstate): ttk = np.random.randint(5,15, size=(2,2,3)) - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): res = sbmcnemar(ttk, exact=False) - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False) for i in range(3)]) assert_allclose(res, res1, rtol=1e-13) - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): res = sbmcnemar(ttk, exact=False, correction=False) - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False, correction=False) for i in range(3)]) assert_allclose(res, res1, rtol=1e-13) - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): res = sbmcnemar(ttk, exact=True) - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=True) for i in range(3)]) assert_allclose(res, res1, rtol=1e-13) @@ -171,7 +171,7 @@ def test_cochransq(): #equivalence of mcnemar and cochranq for 2 samples a,b = x[:,:2].T res = cochrans_q(x[:, :2]) - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): assert_almost_equal(sbmcnemar(a, b, exact=False, correction=False), [res.statistic, res.pvalue]) diff --git a/statsmodels/stats/tests/test_rates_poisson.py b/statsmodels/stats/tests/test_rates_poisson.py --- a/statsmodels/stats/tests/test_rates_poisson.py +++ b/statsmodels/stats/tests/test_rates_poisson.py @@ -360,7 +360,7 @@ def test_twosample_poisson(): assert_allclose(pv2, pv2r*2, rtol=0, atol=5e-4) assert_allclose(s2, 0.7056, atol=0, rtol=5e-4) - with pytest.warns(DeprecationWarning): + with pytest.warns(FutureWarning): s2, pv2 = smr.test_poisson_2indep(count1, n1, count2, n2, method='score-log', ratio_null=1.5) pv2r = 0.2303 @@ -831,7 +831,7 @@ def test_invalid_y_grid(): with warnings.catch_warnings(record=True) as w: etest_poisson_2indep(1, 1, 1, 1, ygrid=[1]) assert len(w) == 1 - assert issubclass(w[0].category, DeprecationWarning) + assert issubclass(w[0].category, FutureWarning) assert "ygrid" in str(w[0].message) # check y_grid validity diff --git a/statsmodels/tsa/statespace/tests/test_simulation_smoothing.py b/statsmodels/tsa/statespace/tests/test_simulation_smoothing.py --- a/statsmodels/tsa/statespace/tests/test_simulation_smoothing.py +++ b/statsmodels/tsa/statespace/tests/test_simulation_smoothing.py @@ -710,7 +710,7 @@ def test_deprecated_arguments_univariate(): initial_state_variates=np.zeros(1)) desired = sim.simulated_state[0] - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): sim.simulate(disturbance_variates=np.r_[mds, sds], initial_state_variates=np.zeros(1)) actual = sim.simulated_state[0] @@ -724,7 +724,7 @@ def test_deprecated_arguments_univariate(): pretransformed_state_disturbance_variates=True) desired = sim.simulated_state[0] - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): sim.simulate(measurement_disturbance_variates=mds, state_disturbance_variates=sds, pretransformed=True, @@ -756,7 +756,7 @@ def test_deprecated_arguments_multivariate(): initial_state_variates=np.zeros(2)) desired = sim.simulated_state[0] - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): sim.simulate(disturbance_variates=np.r_[mds.ravel(), sds.ravel()], initial_state_variates=np.zeros(2)) actual = sim.simulated_state[0] @@ -770,7 +770,7 @@ def test_deprecated_arguments_multivariate(): pretransformed_state_disturbance_variates=True) desired = sim.simulated_state[0] - with pytest.deprecated_call(): + with pytest.warns(FutureWarning): sim.simulate(measurement_disturbance_variates=mds, state_disturbance_variates=sds, pretransformed=True,
FAQ-D/MAINT Use FutureWarning instead of DeprecationWarning for user deprecation warning (I'm late to the story, see pep-565 for python 3.7) DeprecationWarnings are not displayed to users, e.g. don't show up in notebooks. FutureWarnings are not silenced by default, and show up in notebooks. Most of our deprecations affect the user directly, so they need to be FutureWarnings. We might have to convert some DeprecationWarnings to FutureWarnings for example - predict `linear` is currently DeprecationWarnings (in discrete, genmod, stats) - recently added DeprecationWarnings in `stats`, 3 modules - I don't know about DeprectionWarnings in tsa - I didn't search for DeprecationWarnings that are indirectly through decorators
"2023-04-25T09:11:10Z"
0.14
[ "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[sqrt-cent]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[exact-c]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[jeff]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[midp-c]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[sqrt-v]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[sqrt-a]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[waldccv]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[waldccv]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[midp-c]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[sqrt]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[sqrt-a]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[sqrt]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_test[exact-c]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[sqrt-v]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompar1samp::test_confint[sqrt-centcc]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[exact-c]", "statsmodels/stats/tests/test_rates_poisson.py::test_tol_int[case2]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[sqrt-a]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[score]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[wald]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[sqrt]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[waldccv]", "statsmodels/stats/tests/test_rates_poisson.py::test_tol_int[case1]", "statsmodels/stats/tests/test_rates_poisson.py::test_tol_int[case0]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[midp-c]", "statsmodels/stats/tests/test_rates_poisson.py::test_tol_int[case3]", "statsmodels/stats/tests/test_rates_poisson.py::test_tol_int[case4]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_r", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_consistency[sqrt-v]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case16]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_diff_consistency[waldccv]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case3]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case6]", "statsmodels/stats/tests/test_rates_poisson.py::test_tost_poisson", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_ratio_consistency[wald-log]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_diff_consistency[score]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_diff_consistency[wald]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_diff_ratio_consistency[score]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case13]", "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson_diff[case1]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case0]", "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson_diff[case0]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case12]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case8]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case11]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case15]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case1]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case7]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_diff_ratio_consistency[etest]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_diff_ratio_consistency[wald]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case19]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case10]", "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson_r", "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson_diff[case3]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case20]", "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson_diff[case2]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_ratio_consistency[score-log]", "statsmodels/stats/tests/test_rates_poisson.py::test_rate_poisson_diff_ratio_consistency[etest-wald]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case18]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case14]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case17]", "statsmodels/stats/tests/test_rates_poisson.py::test_confint_poisson_2indep", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case5]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case4]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case2]", "statsmodels/stats/tests/test_rates_poisson.py::test_alternative[case9]", "statsmodels/stats/tests/test_rates_poisson.py::test_power_poisson_equal", "statsmodels/stats/tests/test_rates_poisson.py::test_y_grid_regression", "statsmodels/stats/tests/test_rates_poisson.py::test_poisson_power_2ratio", "statsmodels/stats/tests/test_rates_poisson.py::test_power_negbin", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[ratio-sqrt]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[diff-etest-wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[ratio-score-log]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[diff-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[ratio-cond-midp]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-cond-midp]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[ratio-wald-log]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[ratio-wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[diff-wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-etest-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[diff-wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[diff-waldccv]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[ratio-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[diff-wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[diff-waldccv]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[ratio-mover]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[ratio-exact-cond]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[ratio-wald-log]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[ratio-waldcc]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-score-log]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[diff-etest-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[diff-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-exact-cond]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[diff-mover]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[diff-waldccv]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test_vectorized[ratio-score-log]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[ratio-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[ratio-sqrtcc]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-etest-wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_confint[diff-score]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-wald]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-sqrt]", "statsmodels/stats/tests/test_rates_poisson.py::TestMethodsCompare2indep::test_test[ratio-wald-log]", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingMixed::test_simulate_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingMixed::test_simulate_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingMixed::test_simulation_smoothing_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingMixed::test_simulation_smoothing_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingMixed::test_simulation_smoothing_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingMixed::test_simulate_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingMixed::test_loglike", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestDFM::test_simulation_smoothing_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestDFM::test_simulate_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestDFM::test_simulate_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestDFM::test_simulation_smoothing_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestDFM::test_simulation_smoothing_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestDFM::test_loglike", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestDFM::test_simulate_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::test_nan", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::test_simulation_smoothing_obs_intercept", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::test_misc", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::test_simulation_smoothing_state_intercept", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::test_simulation_smoothing_state_intercept_diffuse", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnown::test_simulate_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnown::test_simulation_smoothing_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnown::test_simulation_smoothing_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnown::test_simulate_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnown::test_simulate_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnown::test_simulation_smoothing_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnown::test_loglike", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingAll::test_loglike", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingAll::test_simulation_smoothing_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingAll::test_simulate_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingAll::test_simulate_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingAll::test_simulation_smoothing_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingAll::test_simulation_smoothing_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingAll::test_simulate_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingPartial::test_simulate_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingPartial::test_loglike", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingPartial::test_simulate_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingPartial::test_simulation_smoothing_2", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingPartial::test_simulation_smoothing_0", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingPartial::test_simulate_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVARKnownMissingPartial::test_simulation_smoothing_1", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVAR::test_loglike", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::TestMultivariateVAR::test_simulation_smoothing", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmNegativeBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmInverseGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianInverse::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGaussianLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_X2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgaussIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussianGradient::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_missing", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaScale_dev::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonHC0::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweedieLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_offset_exposure", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_missing", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestRegularized::test_regularized", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmNegbinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution_binom_count", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog_formula", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_invalid_endog", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_endog_dtype", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_r", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_prediction", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmBernoulli::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower2::test_loglike", "statsmodels/genmod/tests/test_glm.py::test_attribute_writable_resettable", "statsmodels/genmod/tests/test_glm.py::test_perfect_pred", "statsmodels/genmod/tests/test_glm.py::test_score_test_ols", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls", "statsmodels/genmod/tests/test_glm.py::test_summary", "statsmodels/genmod/tests/test_glm.py::test_formula_missing_exposure", "statsmodels/genmod/tests/test_glm.py::test_loglike_no_opt", "statsmodels/genmod/tests/test_glm.py::test_plots", "statsmodels/genmod/tests/test_glm.py::test_gradient_irls_eim", "statsmodels/genmod/tests/test_glm.py::test_glm_start_params", "statsmodels/genmod/tests/test_glm.py::test_glm_irls_method", "statsmodels/genmod/tests/test_glm.py::test_wtd_patsy_missing", "statsmodels/genmod/tests/test_glm.py::testTweediePowerEstimate", "statsmodels/genmod/tests/test_glm.py::test_glm_lasso_6431", "statsmodels/genmod/tests/test_glm.py::test_tweedie_elastic_net", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_upper_limit", "statsmodels/genmod/tests/test_glm.py::test_tweedie_EQL_poisson_limit", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int32]", "statsmodels/genmod/tests/test_glm.py::test_glm_bic", "statsmodels/genmod/tests/test_glm.py::test_int_scale", "statsmodels/genmod/tests/test_glm.py::test_output_exposure_null", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int64]", "statsmodels/genmod/tests/test_glm.py::test_glm_bic_warning", "statsmodels/genmod/tests/test_glm.py::test_non_invertible_hessian_fails_summary", "statsmodels/genmod/tests/test_glm.py::test_tweedie_score", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int16]", "statsmodels/genmod/tests/test_glm.py::test_qaic", "statsmodels/genmod/tests/test_glm.py::test_poisson_deviance", "statsmodels/genmod/tests/test_glm.py::test_int_exog[int8]", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_compare_discrete", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_prediction", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_score_test", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmPoisson::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmBinomial::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaIdentity::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower15::test_summary", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_only", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_rtol_only_params", "statsmodels/genmod/tests/test_glm.py::TestConvergence::test_convergence_atol_rtol", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog1::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdTweediePower15::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGammaLog::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_init_kwargs", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_bic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmGammaNewton::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_scale", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_summary", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_bic", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_params", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestStartParams::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog3::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog2::test_mu", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_compare_OLS", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGaussian::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_aic", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_scale", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_params", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestWtdGlmPoissonClu::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmInvgauss::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_bse", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweediePower2::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_mu", "statsmodels/genmod/tests/test_glm.py::TestTweedieSpecialLog0::test_resid", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_tpvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_degrees", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_scale", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_standard_errors", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_loglike", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_bic", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_get_distribution", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_null_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_params", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_Stata", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_residuals", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_summary", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_pearson_chi2", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_aic_R", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestGlmGamma::test_prsquared", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_summary", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_fittedvalues", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_resid", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_df", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_params", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_deviance", "statsmodels/genmod/tests/test_glm.py::TestTweedieLog15Fair::test_bse", "statsmodels/stats/tests/test_nonparametric.py::test_brunnermunzel_two_sided", "statsmodels/stats/tests/test_nonparametric.py::test_runstest", "statsmodels/stats/tests/test_nonparametric.py::test_brunnermunzel_one_sided", "statsmodels/stats/tests/test_nonparametric.py::test_mcnemar_chisquare", "statsmodels/stats/tests/test_nonparametric.py::test_runstest_2sample", "statsmodels/stats/tests/test_nonparametric.py::test_cochransq3", "statsmodels/stats/tests/test_nonparametric.py::test_symmetry_bowker", "statsmodels/stats/tests/test_nonparametric.py::test_rank_compare_2indep1", "statsmodels/stats/tests/test_nonparametric.py::test_rank_compare_vectorized", "statsmodels/stats/tests/test_nonparametric.py::test_cochransq2", "statsmodels/stats/tests/test_nonparametric.py::test_mcnemar_exact", "statsmodels/stats/tests/test_nonparametric.py::test_rank_compare_ord" ]
[ "statsmodels/stats/tests/test_rates_poisson.py::test_twosample_poisson", "statsmodels/stats/tests/test_rates_poisson.py::test_invalid_y_grid", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::test_deprecated_arguments_univariate", "statsmodels/tsa/statespace/tests/test_simulation_smoothing.py::test_deprecated_arguments_multivariate", "statsmodels/genmod/tests/test_glm.py::TestGlmPoissonOffset::test_predict", "statsmodels/stats/tests/test_nonparametric.py::test_cochransq", "statsmodels/stats/tests/test_nonparametric.py::test_mcnemar_vectorized" ]
Python
[]
[]
statsmodels/statsmodels
8,835
statsmodels__statsmodels-8835
[ "8514" ]
4fee62a4633cfa8ec5db1909fbff5cae572e922e
diff --git a/statsmodels/tsa/seasonal.py b/statsmodels/tsa/seasonal.py --- a/statsmodels/tsa/seasonal.py +++ b/statsmodels/tsa/seasonal.py @@ -342,7 +342,7 @@ def plot( else: xlim = (0, self._observed.shape[0] - 1) - fig, axs = plt.subplots(len(series), 1) + fig, axs = plt.subplots(len(series), 1, sharex=True) for i, (ax, (series, def_name)) in enumerate(zip(axs, series)): if def_name != "residual": ax.plot(series)
diff --git a/statsmodels/tsa/forecasting/tests/test_stl.py b/statsmodels/tsa/forecasting/tests/test_stl.py --- a/statsmodels/tsa/forecasting/tests/test_stl.py +++ b/statsmodels/tsa/forecasting/tests/test_stl.py @@ -40,6 +40,16 @@ def test_smoke(data): assert hasattr(res.model_result, "forecast") [email protected] +def test_sharex(data): + stlf = STLForecast(data, ARIMA, model_kwargs={"order": (2, 0, 0)}) + res = stlf.fit(fit_kwargs={}) + plt = res.result.plot() + grouper_view = plt.axes[0].get_shared_x_axes() + sibs = grouper_view.get_siblings(plt.axes[1]) + assert len(sibs) == 4 + + MODELS = [ (ARIMA, {"order": (2, 0, 0), "trend": "c"}), (ExponentialSmoothing, {"trend": True}),
Use sharex=True in plotting seasonal decomposition This is a tiny change in the `plot` method of `DecomposeResult` (in statsmodels.tsa.seasonal). I started to use it just a few minutes ago on a quite long time series (8760 samples), so that in a interactive Matlplotlib plot, it is useful to zoom horizontally. Unfortunately, the horizontal zooms are not in sync. Adding `sharex=True` in the call to `plt.subplots` allows synchronizing the zoom between all seasonal components. An alternative would be to have this synchronization optional (one extra parameter to `DecomposeResult.plot`), but I don't see a use case which needs *dis*-synchronization, what do you think?
Is there an API call to disable it from an existing plot? If so, could you add a note that sharex is true by default. Axis synchronization can be disabled using ax.set_???? > Is there an API call to disable it from an existing plot? Good question, I hadn't thought of this. It took me a little digging to follow the layers of successive closed Matplotlib Issues and PR: - https://github.com/matplotlib/matplotlib/issues/318 - https://github.com/matplotlib/matplotlib/pull/1312 (2012-2015), not merged - https://github.com/matplotlib/matplotlib/pull/9923 (2017-2020), not merged, with last comment "#15287 does some of this." - https://github.com/matplotlib/matplotlib/pull/15287 (2019-2020), merge, but with the first comment stating that "This intentionally does not allow unsharing or changing the shared axes, as there are bigger questions on the API there (#9923)" So the short answer seems to be "no, it's not possible to unshare axes", but perhaps I missed something cause I read the threads quite fast. Coming back to the display of decomposed time series, I don't think it's a problem though, for two reasons: 1. having synced horizontal axes is a sensible default (or said differently, for me it's a bug that they are currently not synced) 2. `DecomposeResult.plot` is only a quick convenience display for fast [EDA](https://en.wikipedia.org/wiki/Exploratory_data_analysis). There are other plot settings that a user may wish different (like the appearance of the residue), but then it doesn't cost that much to remake the plot from scratch using the decomposed channels (`DecomposeResult.trend`...) Do you want to add a keyword argument so that this is optional? I'm find with True being the default. Sorry for the late reply. > Do you want to add a keyword argument so that this is optional? I'm find with True being the default. This is possible, but is it worth complexifying the API by an extra keyword? Of course it's only a small extra complexity, but on the other hand, I don't see a use case for having *unsynced* axes. More generally, there are many other small plot parameters that could be brought to the `DecomposeResult.plot()` method signature, but none can be changed at present, so why just one "sharex=True"?
"2023-04-25T09:24:24Z"
0.14
[ "statsmodels/tsa/forecasting/tests/test_stl.py::test_exceptions", "statsmodels/tsa/forecasting/tests/test_stl.py::test_equivalence_forecast[23-ETSModel]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_equivalence_forecast[7-ETSModel]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_equivalence_forecast[1-ETSModel]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_no_var_pred[False]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_no_var_pred[True]", "statsmodels/tsa/forecasting/tests/test_stl.py::test_get_prediction", "statsmodels/tsa/forecasting/tests/test_stl.py::test_smoke" ]
[ "statsmodels/tsa/forecasting/tests/test_stl.py::test_sharex" ]
Python
[]
[]
statsmodels/statsmodels
8,887
statsmodels__statsmodels-8887
[ "8879" ]
12e0284e8eee9610f35a471749b6ea5c79e49a52
diff --git a/statsmodels/stats/_adnorm.py b/statsmodels/stats/_adnorm.py --- a/statsmodels/stats/_adnorm.py +++ b/statsmodels/stats/_adnorm.py @@ -5,6 +5,8 @@ Author: Josef Perktold and Scipy developers License : BSD-3 """ +import warnings + import numpy as np from scipy import stats @@ -64,8 +66,12 @@ def anderson_statistic(x, dist='norm', fit=True, params=(), axis=0): sl2 = [slice(None)] * x.ndim sl2[axis] = slice(None, None, -1) sl2 = tuple(sl2) - s = np.sum((2 * i[sl1] - 1.0) / nobs * (np.log(z) + np.log1p(-z[sl2])), - axis=axis) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", message="divide by zero encountered in log1p" + ) + ad_values = (2 * i[sl1] - 1.0) / nobs * (np.log(z) + np.log1p(-z[sl2])) + s = np.sum(ad_values, axis=axis) a2 = -nobs - s return a2 diff --git a/statsmodels/stats/diagnostic.py b/statsmodels/stats/diagnostic.py --- a/statsmodels/stats/diagnostic.py +++ b/statsmodels/stats/diagnostic.py @@ -1072,7 +1072,7 @@ def linear_reset(res, power=3, test_type="fitted", use_f=False, raise ValueError("power must contains distinct integers all >= 2") exog = res.model.exog if test_type == "fitted": - aug = res.fittedvalues[:, None] + aug = np.asarray(res.fittedvalues)[:, None] elif test_type == "exog": # Remove constant and binary aug = res.model.exog @@ -1293,7 +1293,7 @@ def linear_lm(resid, exog, func=None): if func is None: def func(x): return np.power(x, 2) - + exog = np.asarray(exog) exog_aux = np.column_stack((exog, func(exog[:, 1:]))) nobs, k_vars = exog.shape
diff --git a/statsmodels/stats/tests/test_diagnostic.py b/statsmodels/stats/tests/test_diagnostic.py --- a/statsmodels/stats/tests/test_diagnostic.py +++ b/statsmodels/stats/tests/test_diagnostic.py @@ -1957,3 +1957,43 @@ def test_small_skip(reset_randomstate): # M2 + fit(M1)-exp(fit(M2)) < 2.22e-16 *** # --- # Signif. codes: 0 β€˜***’ 0.001 β€˜**’ 0.01 β€˜*’ 0.05 β€˜.’ 0.1 β€˜ ’ 1 + [email protected] +def test_diagnostics_pandas(reset_randomstate): + # GH 8879 + n = 100 + df = pd.DataFrame( + { + "y": np.random.rand(n), + "x": np.random.rand(n), + "z": np.random.rand(n)} + ) + y, x = df["y"], add_constant(df["x"]) + + res = OLS(df["y"], add_constant(df[["x"]])).fit() + res_large = OLS(df["y"], add_constant(df[["x", "z"]])).fit() + res_other = OLS(df["y"], add_constant(df[["z"]])).fit() + smsdia.linear_reset(res_large) + smsdia.linear_reset(res_large, test_type="fitted") + smsdia.linear_reset(res_large, test_type="exog") + smsdia.linear_reset(res_large, test_type="princomp") + smsdia.het_goldfeldquandt(y, x) + smsdia.het_breuschpagan(res.resid, x) + smsdia.het_white(res.resid, x) + smsdia.het_arch(res.resid) + smsdia.acorr_breusch_godfrey(res) + smsdia.acorr_ljungbox(y) + smsdia.linear_rainbow(res) + smsdia.linear_lm(res.resid, x) + smsdia.linear_harvey_collier(res) + smsdia.acorr_lm(res.resid) + smsdia.breaks_cusumolsresid(res.resid) + smsdia.breaks_hansen(res) + smsdia.compare_cox(res, res_other) + smsdia.compare_encompassing(res, res_other) + smsdia.compare_j(res, res_other) + smsdia.recursive_olsresiduals(res) + smsdia.recursive_olsresiduals( + res, order_by=np.arange(y.shape[0] - 1, 0 - 1, -1) + ) + smsdia.spec_white(res.resid, x)
Multi-dimensional indexing error in statsmodels.stats.diagnostic.linear_reset when using Pandas #### Describe the bug I tried to do a linear RESET test, but I got an error: `ValueError: Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer supported. Convert to a numpy array before indexing instead.` Complete error message down below. #### Code Sample, a copy-pastable example if possible ```python #!/usr/bin/env python3 import numpy as np import pandas as pd import statsmodels.stats.diagnostic as smdiag import statsmodels.api as sm n = 100 df = pd.DataFrame() df["y"] = np.random.rand(n) df["x"] = np.random.rand(n) OLS_results = sm.OLS(df["y"], df["x"]).fit() smdiag.linear_reset(OLS_results) ``` <details> Complete error Traceback: ``` Traceback (most recent call last): File "/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/bug_report.py", line 16, in <module> smdiag.linear_reset(OLS_results) File "/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/pandas/util/_decorators.py", line 210, in wrapper return func(*args, **kwargs) File "/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/statsmodels/stats/diagnostic.py", line 1065, in linear_reset aug = res.fittedvalues[:, None] File "/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/pandas/core/series.py", line 1033, in __getitem__ return self._get_with(key) File "/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/pandas/core/series.py", line 1048, in _get_with return self._get_values_tuple(key) File "/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/pandas/core/series.py", line 1082, in _get_values_tuple disallow_ndim_indexing(result) File "/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/pandas/core/indexers/utils.py", line 343, in disallow_ndim_indexing raise ValueError( ValueError: Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer supported. Convert to a numpy array before indexing instead. ``` The error does not occur if `y` and `x` are passed as NumPy arrays, only when passed as pandas dataframe. Using `statsmodels.stats.outliers_influence.reset_ramsey` also doesn't give this same problem. </details> If the issue has not been resolved, please file it in the issue tracker. #### Expected Output I expect to get the results of the test, as `statsmodels.stats.outliers_influence import reset_ramsey` does. #### Output of ``import statsmodels.api as sm; sm.show_versions()`` <details> [paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line] INSTALLED VERSIONS ------------------ Python: 3.10.6.final.0 OS: Linux 6.2.6-76060206-generic #202303130630~1681329778~22.04~d824cd4 SMP PREEMPT_DYNAMIC Wed A x86_64 byteorder: little LC_ALL: None LANG: en_US.UTF-8 statsmodels =========== Installed: 0.14.0 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/statsmodels) Required Dependencies ===================== cython: Not installed numpy: 1.24.3 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/numpy) scipy: 1.10.1 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/scipy) pandas: 2.0.1 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/pandas) dateutil: 2.8.2 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/dateutil) patsy: 0.5.3 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/patsy) Optional Dependencies ===================== matplotlib: 3.7.1 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/matplotlib) backend: agg cvxopt: Not installed joblib: 1.2.0 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/joblib) Developer Tools ================ IPython: 8.13.2 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/IPython) jinja2: Not installed sphinx: Not installed pygments: 2.15.1 (/home/jonathan/RietNAS/Opleiding/EUR/1. Pre-master/FEB22009S Seminar Case Study/.venv/lib/python3.10/site-packages/pygments) pytest: Not installed virtualenv: Not installed </details>
looks like a bug with pandas compatibility The function should use the unwrapped results instance internally. `smdiag.linear_reset(OLS_results._results)` should have worked as workaround, however it breaks on the isinstance check. So the only workaround I see right now (except for monkey patching fittedvalues) is to reestimate the model without pandas: ``` res_np = sm.OLS(OLS_results.model.endog, OLS_results.model.exog).fit() smdiag.linear_reset(res_np) <class 'statsmodels.stats.contrast.ContrastResults'> <Wald test (chi2): statistic=63.544945204592665, p-value=1.5899756467736924e-14, df_denom=2> ``` Yeah that sounds like a fair workaround. I'm not sure what the internals of this looks like, but a call `y = np.array(y)` somewhere in `linear_reset` probably fixes it, but I'll leave it to the devs to devise a better solution (: I have the same issue for `.compare_lm_test()`. ``` import numpy as np import pandas as pd import statsmodels.formula.api as smf n = 100 df = pd.DataFrame() for v in ["y","x","z","k"]: df[v] = np.random.rand(n) res0 = smf.ols("y ~ x + z + k", data=df).fit() res1 = smf.ols("y ~ x", data=df).fit() res0.compare_lm_test(res1) ``` The above code generates the following error ``` ValueError: Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer supported. Convert to a numpy array before indexing instead. ``` Python: 3.10.11 pandas: 2.0.1 statsmodels 0.14.0 You can downgrade your pandas as well to get around this. Should be fixed in next point release.
"2023-05-18T08:15:50Z"
0.14
[ "statsmodels/stats/tests/test_diagnostic.py::test_gq", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_period", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_auto_lag_whitenoise", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[300]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-False]", "statsmodels/stats/tests/test_diagnostic.py::test_influence_wrapped", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-False]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-2-True]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_errors_warnings", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-True]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-princomp-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-0-False]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by2-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_small_skip", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-True]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke_no_autolag", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_influence_dtype", "statsmodels/stats/tests/test_diagnostic.py::test_linear_lm_direct", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[0.33]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-True-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by4-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_centered[None]", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_auto_lag_selection", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-princomp-2]", "statsmodels/stats/tests/test_diagnostic.py::test_outlier_influence_funcs", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[None-0.5]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_exception", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-fitted-3]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-exog-3]", "statsmodels/stats/tests/test_diagnostic.py::test_spec_white_error", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_direct[nonrobust]", "statsmodels/stats/tests/test_diagnostic.py::test_outlier_test", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[x0-0.25]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov1-False-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-True-fitted-2]", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov0-2-True]", "statsmodels/stats/tests/test_diagnostic.py::test_rainbow_smoke_order_by[order_by1-0.75]", "statsmodels/stats/tests/test_diagnostic.py::test_spec_white", "statsmodels/stats/tests/test_diagnostic.py::test_reset_smoke[cov0-False-exog-2]", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_error", "statsmodels/stats/tests/test_diagnostic.py::test_ljungbox_dof_adj", "statsmodels/stats/tests/test_diagnostic.py::test_acorr_lm_smoke[cov1-0-False]", "statsmodels/stats/tests/test_diagnostic.py::test_encompasing_direct[HC0]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey_exogs", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_error[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_basic", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_small_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_influence", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_error[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey_multidim", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_nested[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_arch", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_arch2", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan_nonrobust", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_nested[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_harvey_collier", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_against_r", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_breusch_godfrey", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_cusum_ols", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_white", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_j", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_breusch_pagan_1d_err", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_normality", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_goldfeldquandt", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_het_white_error", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_breaks_hansen", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_cox", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_acorr_ljung_box_big_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_rainbow", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_hac", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_recursive_residuals", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticG::test_compare_lr", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_lr", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_against_r", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan_nonrobust", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_goldfeldquandt", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_breaks_hansen", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_big_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_arch", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_error[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey_multidim", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_ljung_box_small_default", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_arch2", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_white_error", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_cox", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_nested[compare_j]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_breusch_pagan_1d_err", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_nested[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_recursive_residuals", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_basic", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_het_white", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_normality", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_cusum_ols", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_j", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_compare_error[compare_cox]", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_acorr_breusch_godfrey_exogs", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_hac", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_influence", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_rainbow", "statsmodels/stats/tests/test_diagnostic.py::TestDiagnosticGPandas::test_harvey_collier" ]
[ "statsmodels/stats/tests/test_diagnostic.py::test_diagnostics_pandas" ]
Python
[]
[]