diff --git a/.gitattributes b/.gitattributes index 6bf1f98fe26ee0ee6649870f8d85d9fcd9c56b56..8284208572ddff7b83b6a39b3a437112ea31a084 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1634,3 +1634,7 @@ evalkit_tf437/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1800 fi falcon/bin/python filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/utilities/tests/__pycache__/test_wester.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_tf437/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de4af0501d423defe0dacb7af82dd48388362924 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/matplotlib.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/matplotlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b935a81c65874614823a27bd2fcee133c5c56ab Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/backends/matplotlibbackend/__pycache__/matplotlib.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_experimental_lambdify.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_experimental_lambdify.py new file mode 100644 index 0000000000000000000000000000000000000000..95839d668762be7be94d0de5092594306ceeadbd --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_experimental_lambdify.py @@ -0,0 +1,77 @@ +from sympy.core.symbol import symbols, Symbol +from sympy.functions import Max +from sympy.plotting.experimental_lambdify import experimental_lambdify +from sympy.plotting.intervalmath.interval_arithmetic import \ + interval, intervalMembership + + +# Tests for exception handling in experimental_lambdify +def test_experimental_lambify(): + x = Symbol('x') + f = experimental_lambdify([x], Max(x, 5)) + # XXX should f be tested? If f(2) is attempted, an + # error is raised because a complex produced during wrapping of the arg + # is being compared with an int. + assert Max(2, 5) == 5 + assert Max(5, 7) == 7 + + x = Symbol('x-3') + f = experimental_lambdify([x], x + 1) + assert f(1) == 2 + + +def test_composite_boolean_region(): + x, y = symbols('x y') + + r1 = (x - 1)**2 + y**2 < 2 + r2 = (x + 1)**2 + y**2 < 2 + + f = experimental_lambdify((x, y), r1 & r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), r1 | r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), r1 & ~r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), ~r1 & r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(True, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(False, True) + + f = experimental_lambdify((x, y), ~r1 & ~r2) + a = (interval(-0.1, 0.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-1.1, -0.9), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(0.9, 1.1), interval(-0.1, 0.1)) + assert f(*a) == intervalMembership(False, True) + a = (interval(-0.1, 0.1), interval(1.9, 2.1)) + assert f(*a) == intervalMembership(True, True) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_plot.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..bf09e825e7444cfdaf42e8c419dc50170168365b --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_plot.py @@ -0,0 +1,1343 @@ +import os +from tempfile import TemporaryDirectory +import pytest +from sympy.concrete.summations import Sum +from sympy.core.numbers import (I, oo, pi) +from sympy.core.relational import Ne +from sympy.core.symbol import Symbol, symbols +from sympy.functions.elementary.exponential import (LambertW, exp, exp_polar, log) +from sympy.functions.elementary.miscellaneous import (real_root, sqrt) +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.functions.elementary.miscellaneous import Min +from sympy.functions.special.hyper import meijerg +from sympy.integrals.integrals import Integral +from sympy.logic.boolalg import And +from sympy.core.singleton import S +from sympy.core.sympify import sympify +from sympy.external import import_module +from sympy.plotting.plot import ( + Plot, plot, plot_parametric, plot3d_parametric_line, plot3d, + plot3d_parametric_surface) +from sympy.plotting.plot import ( + unset_show, plot_contour, PlotGrid, MatplotlibBackend, TextBackend) +from sympy.plotting.series import ( + LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries, + ParametricSurfaceSeries, SurfaceOver2DRangeSeries) +from sympy.testing.pytest import skip, warns, raises, warns_deprecated_sympy +from sympy.utilities import lambdify as lambdify_ +from sympy.utilities.exceptions import ignore_warnings + +unset_show() + + +matplotlib = import_module( + 'matplotlib', min_module_version='1.1.0', catch=(RuntimeError,)) + + +class DummyBackendNotOk(Plot): + """ Used to verify if users can create their own backends. + This backend is meant to raise NotImplementedError for methods `show`, + `save`, `close`. + """ + def __new__(cls, *args, **kwargs): + return object.__new__(cls) + + +class DummyBackendOk(Plot): + """ Used to verify if users can create their own backends. + This backend is meant to pass all tests. + """ + def __new__(cls, *args, **kwargs): + return object.__new__(cls) + + def show(self): + pass + + def save(self): + pass + + def close(self): + pass + +def test_basic_plotting_backend(): + x = Symbol('x') + plot(x, (x, 0, 3), backend='text') + plot(x**2 + 1, (x, 0, 3), backend='text') + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_1(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + ### + # Examples from the 'introduction' notebook + ### + p = plot(x, legend=True, label='f1', adaptive=adaptive, n=10) + p = plot(x*sin(x), x*cos(x), label='f2', adaptive=adaptive, n=10) + p.extend(p) + p[0].line_color = lambda a: a + p[1].line_color = 'b' + p.title = 'Big title' + p.xlabel = 'the x axis' + p[1].label = 'straight line' + p.legend = True + p.aspect_ratio = (1, 1) + p.xlim = (-15, 20) + filename = 'test_basic_options_and_colors.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p.extend(plot(x + 1, adaptive=adaptive, n=10)) + p.append(plot(x + 3, x**2, adaptive=adaptive, n=10)[1]) + filename = 'test_plot_extend_append.png' + p.save(os.path.join(tmpdir, filename)) + + p[2] = plot(x**2, (x, -2, 3), adaptive=adaptive, n=10) + filename = 'test_plot_setitem.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(sin(x), (x, -2*pi, 4*pi), adaptive=adaptive, n=10) + filename = 'test_line_explicit.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(sin(x), adaptive=adaptive, n=10) + filename = 'test_line_default_range.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)), adaptive=adaptive, n=10) + filename = 'test_line_multiple_range.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + raises(ValueError, lambda: plot(x, y)) + + #Piecewise plots + p = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1), adaptive=adaptive, n=10) + filename = 'test_plot_piecewise.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(Piecewise((x, x < 1), (x**2, True)), (x, -3, 3), adaptive=adaptive, n=10) + filename = 'test_plot_piecewise_2.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # test issue 7471 + p1 = plot(x, adaptive=adaptive, n=10) + p2 = plot(3, adaptive=adaptive, n=10) + p1.extend(p2) + filename = 'test_horizontal_line.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # test issue 10925 + f = Piecewise((-1, x < -1), (x, And(-1 <= x, x < 0)), \ + (x**2, And(0 <= x, x < 1)), (x**3, x >= 1)) + p = plot(f, (x, -3, 3), adaptive=adaptive, n=10) + filename = 'test_plot_piecewise_3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_2(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + z = Symbol('z') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + #parametric 2d plots. + #Single plot with default range. + p = plot_parametric(sin(x), cos(x), adaptive=adaptive, n=10) + filename = 'test_parametric.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #Single plot with range. + p = plot_parametric( + sin(x), cos(x), (x, -5, 5), legend=True, label='parametric_plot', + adaptive=adaptive, n=10) + filename = 'test_parametric_range.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #Multiple plots with same range. + p = plot_parametric((sin(x), cos(x)), (x, sin(x)), + adaptive=adaptive, n=10) + filename = 'test_parametric_multiple.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #Multiple plots with different ranges. + p = plot_parametric( + (sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)), + adaptive=adaptive, n=10) + filename = 'test_parametric_multiple_ranges.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #depth of recursion specified. + p = plot_parametric(x, sin(x), depth=13, + adaptive=adaptive, n=10) + filename = 'test_recursion_depth.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #No adaptive sampling. + p = plot_parametric(cos(x), sin(x), adaptive=False, n=500) + filename = 'test_adaptive.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + #3d parametric plots + p = plot3d_parametric_line( + sin(x), cos(x), x, legend=True, label='3d_parametric_plot', + adaptive=adaptive, n=10) + filename = 'test_3d_line.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_line( + (sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)), + adaptive=adaptive, n=10) + filename = 'test_3d_line_multiple.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_line(sin(x), cos(x), x, n=30, + adaptive=adaptive) + filename = 'test_3d_line_points.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # 3d surface single plot. + p = plot3d(x * y, adaptive=adaptive, n=10) + filename = 'test_surface.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple 3D plots with same range. + p = plot3d(-x * y, x * y, (x, -5, 5), adaptive=adaptive, n=10) + filename = 'test_surface_multiple.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple 3D plots with different ranges. + p = plot3d( + (x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)), + adaptive=adaptive, n=10) + filename = 'test_surface_multiple_ranges.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Single Parametric 3D plot + p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y, + adaptive=adaptive, n=10) + filename = 'test_parametric_surface.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple Parametric 3D plots. + p = plot3d_parametric_surface( + (x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)), + (sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)), + adaptive=adaptive, n=10) + filename = 'test_parametric_surface.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Single Contour plot. + p = plot_contour(sin(x)*sin(y), (x, -5, 5), (y, -5, 5), + adaptive=adaptive, n=10) + filename = 'test_contour_plot.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple Contour plots with same range. + p = plot_contour(x**2 + y**2, x**3 + y**3, (x, -5, 5), (y, -5, 5), + adaptive=adaptive, n=10) + filename = 'test_contour_plot.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # Multiple Contour plots with different range. + p = plot_contour( + (x**2 + y**2, (x, -5, 5), (y, -5, 5)), + (x**3 + y**3, (x, -3, 3), (y, -3, 3)), + adaptive=adaptive, n=10) + filename = 'test_contour_plot.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_3(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + z = Symbol('z') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + ### + # Examples from the 'colors' notebook + ### + + p = plot(sin(x), adaptive=adaptive, n=10) + p[0].line_color = lambda a: a + filename = 'test_colors_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + + p[0].line_color = lambda a, b: b + filename = 'test_colors_line_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(x*sin(x), x*cos(x), (x, 0, 10), adaptive=adaptive, n=10) + p[0].line_color = lambda a: a + filename = 'test_colors_param_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + + p[0].line_color = lambda a, b: a + filename = 'test_colors_param_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + + p[0].line_color = lambda a, b: b + filename = 'test_colors_param_line_arity2b.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_line( + sin(x) + 0.1*sin(x)*cos(7*x), + cos(x) + 0.1*cos(x)*cos(7*x), + 0.1*sin(7*x), + (x, 0, 2*pi), adaptive=adaptive, n=10) + p[0].line_color = lambdify_(x, sin(4*x)) + filename = 'test_colors_3d_line_arity1.png' + p.save(os.path.join(tmpdir, filename)) + p[0].line_color = lambda a, b: b + filename = 'test_colors_3d_line_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p[0].line_color = lambda a, b, c: c + filename = 'test_colors_3d_line_arity3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5), adaptive=adaptive, n=10) + p[0].surface_color = lambda a: a + filename = 'test_colors_surface_arity1.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambda a, b: b + filename = 'test_colors_surface_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambda a, b, c: c + filename = 'test_colors_surface_arity3a.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambdify_((x, y, z), sqrt((x - 3*pi)**2 + y**2)) + filename = 'test_colors_surface_arity3b.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y, + (x, -1, 1), (y, -1, 1), adaptive=adaptive, n=10) + p[0].surface_color = lambda a: a + filename = 'test_colors_param_surf_arity1.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambda a, b: a*b + filename = 'test_colors_param_surf_arity2.png' + p.save(os.path.join(tmpdir, filename)) + p[0].surface_color = lambdify_((x, y, z), sqrt(x**2 + y**2 + z**2)) + filename = 'test_colors_param_surf_arity3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True]) +def test_plot_and_save_4(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + ### + # Examples from the 'advanced' notebook + ### + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y)) + p = plot(i, (y, 1, 5), adaptive=adaptive, n=10, force_real_eval=True) + filename = 'test_advanced_integral.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_5(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + s = Sum(1/x**y, (x, 1, oo)) + p = plot(s, (y, 2, 10), adaptive=adaptive, n=10) + filename = 'test_advanced_inf_sum.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p = plot(Sum(1/x, (x, 1, y)), (y, 2, 10), show=False, + adaptive=adaptive, n=10) + p[0].only_integers = True + p[0].steps = True + filename = 'test_advanced_fin_sum.png' + + # XXX: This should be fixed in experimental_lambdify or by using + # ordinary lambdify so that it doesn't warn. The error results from + # passing an array of values as the integration limit. + # + # UserWarning: The evaluation of the expression is problematic. We are + # trying a failback method that may still work. Please report this as a + # bug. + with ignore_warnings(UserWarning): + p.save(os.path.join(tmpdir, filename)) + + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_and_save_6(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + filename = 'test.png' + ### + # Test expressions that can not be translated to np and generate complex + # results. + ### + p = plot(sin(x) + I*cos(x)) + p.save(os.path.join(tmpdir, filename)) + + with ignore_warnings(RuntimeWarning): + p = plot(sqrt(sqrt(-x))) + p.save(os.path.join(tmpdir, filename)) + + p = plot(LambertW(x)) + p.save(os.path.join(tmpdir, filename)) + p = plot(sqrt(LambertW(x))) + p.save(os.path.join(tmpdir, filename)) + + #Characteristic function of a StudentT distribution with nu=10 + x1 = 5 * x**2 * exp_polar(-I*pi)/2 + m1 = meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), x1) + x2 = 5*x**2 * exp_polar(I*pi)/2 + m2 = meijerg(((1/2,), ()), ((5, 0, 1/2), ()), x2) + expr = (m1 + m2) / (48 * pi) + with warns( + UserWarning, + match="The evaluation with NumPy/SciPy failed", + test_stacklevel=False, + ): + p = plot(expr, (x, 1e-6, 1e-2), adaptive=adaptive, n=10) + p.save(os.path.join(tmpdir, filename)) + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plotgrid_and_save(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + + with TemporaryDirectory(prefix='sympy_') as tmpdir: + p1 = plot(x, adaptive=adaptive, n=10) + p2 = plot_parametric((sin(x), cos(x)), (x, sin(x)), show=False, + adaptive=adaptive, n=10) + p3 = plot_parametric( + cos(x), sin(x), adaptive=adaptive, n=10, show=False) + p4 = plot3d_parametric_line(sin(x), cos(x), x, show=False, + adaptive=adaptive, n=10) + # symmetric grid + p = PlotGrid(2, 2, p1, p2, p3, p4) + filename = 'test_grid1.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + # grid size greater than the number of subplots + p = PlotGrid(3, 4, p1, p2, p3, p4) + filename = 'test_grid2.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + p5 = plot(cos(x),(x, -pi, pi), show=False, adaptive=adaptive, n=10) + p5[0].line_color = lambda a: a + p6 = plot(Piecewise((1, x > 0), (0, True)), (x, -1, 1), show=False, + adaptive=adaptive, n=10) + p7 = plot_contour( + (x**2 + y**2, (x, -5, 5), (y, -5, 5)), + (x**3 + y**3, (x, -3, 3), (y, -3, 3)), show=False, + adaptive=adaptive, n=10) + # unsymmetric grid (subplots in one line) + p = PlotGrid(1, 3, p5, p6, p7) + filename = 'test_grid3.png' + p.save(os.path.join(tmpdir, filename)) + p._backend.close() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_append_issue_7140(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p1 = plot(x, adaptive=adaptive, n=10) + p2 = plot(x**2, adaptive=adaptive, n=10) + plot(x + 2, adaptive=adaptive, n=10) + + # append a series + p2.append(p1[0]) + assert len(p2._series) == 2 + + with raises(TypeError): + p1.append(p2) + + with raises(TypeError): + p1.append(p2._series) + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_15265(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + eqn = sin(x) + + p = plot(eqn, xlim=(-S.Pi, S.Pi), ylim=(-1, 1), adaptive=adaptive, n=10) + p._backend.close() + + p = plot(eqn, xlim=(-1, 1), ylim=(-S.Pi, S.Pi), adaptive=adaptive, n=10) + p._backend.close() + + p = plot(eqn, xlim=(-1, 1), adaptive=adaptive, n=10, + ylim=(sympify('-3.14'), sympify('3.14'))) + p._backend.close() + + p = plot(eqn, adaptive=adaptive, n=10, + xlim=(sympify('-3.14'), sympify('3.14')), ylim=(-1, 1)) + p._backend.close() + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(-S.ImaginaryUnit, 1), ylim=(-1, 1))) + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(-1, 1), ylim=(-1, S.ImaginaryUnit))) + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(S.NegativeInfinity, 1), ylim=(-1, 1))) + + raises(ValueError, + lambda: plot(eqn, adaptive=adaptive, n=10, + xlim=(-1, 1), ylim=(-1, S.Infinity))) + + +def test_empty_Plot(): + if not matplotlib: + skip("Matplotlib not the default backend") + + # No exception showing an empty plot + plot() + # Plot is only a base class: doesn't implement any logic for showing + # images + p = Plot() + raises(NotImplementedError, lambda: p.show()) + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_17405(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + f = x**0.3 - 10*x**3 + x**2 + p = plot(f, (x, -10, 10), adaptive=adaptive, n=30, show=False) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + + # RuntimeWarning: invalid value encountered in double_scalars + with ignore_warnings(RuntimeWarning): + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_logplot_PR_16796(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(x, (x, .001, 100), adaptive=adaptive, n=30, + xscale='log', show=False) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + assert len(p[0].get_data()[0]) >= 30 + assert p[0].end == 100.0 + assert p[0].start == .001 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_16572(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(LambertW(x), show=False, adaptive=adaptive, n=30) + # Random number of segments, probably more than 50, but we want to see + # that there are segments generated, as opposed to when the bug was present + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_11865(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + k = Symbol('k', integer=True) + f = Piecewise((-I*exp(I*pi*k)/k + I*exp(-I*pi*k)/k, Ne(k, 0)), (2*pi, True)) + p = plot(f, show=False, adaptive=adaptive, n=30) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + # and that there are no exceptions. + assert len(p[0].get_data()[0]) >= 30 + + +def test_issue_11461(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(real_root((log(x/(x-2))), 3), show=False, adaptive=True) + with warns( + RuntimeWarning, + match="invalid value encountered in", + test_stacklevel=False, + ): + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + # and that there are no exceptions. + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_11764(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot_parametric(cos(x), sin(x), (x, 0, 2 * pi), + aspect_ratio=(1,1), show=False, adaptive=adaptive, n=30) + assert p.aspect_ratio == (1, 1) + # Random number of segments, probably more than 100, but we want to see + # that there are segments generated, as opposed to when the bug was present + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_issue_13516(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + pm = plot(sin(x), backend="matplotlib", show=False, adaptive=adaptive, n=30) + assert pm.backend == MatplotlibBackend + assert len(pm[0].get_data()[0]) >= 30 + + pt = plot(sin(x), backend="text", show=False, adaptive=adaptive, n=30) + assert pt.backend == TextBackend + assert len(pt[0].get_data()[0]) >= 30 + + pd = plot(sin(x), backend="default", show=False, adaptive=adaptive, n=30) + assert pd.backend == MatplotlibBackend + assert len(pd[0].get_data()[0]) >= 30 + + p = plot(sin(x), show=False, adaptive=adaptive, n=30) + assert p.backend == MatplotlibBackend + assert len(p[0].get_data()[0]) >= 30 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_limits(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(x, x**2, (x, -10, 10), adaptive=adaptive, n=10) + backend = p._backend + + xmin, xmax = backend.ax.get_xlim() + assert abs(xmin + 10) < 2 + assert abs(xmax - 10) < 2 + ymin, ymax = backend.ax.get_ylim() + assert abs(ymin + 10) < 10 + assert abs(ymax - 100) < 10 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot3d_parametric_line_limits(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + v1 = (2*cos(x), 2*sin(x), 2*x, (x, -5, 5)) + v2 = (sin(x), cos(x), x, (x, -5, 5)) + p = plot3d_parametric_line(v1, v2, adaptive=adaptive, n=60) + backend = p._backend + + xmin, xmax = backend.ax.get_xlim() + assert abs(xmin + 2) < 1e-2 + assert abs(xmax - 2) < 1e-2 + ymin, ymax = backend.ax.get_ylim() + assert abs(ymin + 2) < 1e-2 + assert abs(ymax - 2) < 1e-2 + zmin, zmax = backend.ax.get_zlim() + assert abs(zmin + 10) < 1e-2 + assert abs(zmax - 10) < 1e-2 + + p = plot3d_parametric_line(v2, v1, adaptive=adaptive, n=60) + backend = p._backend + + xmin, xmax = backend.ax.get_xlim() + assert abs(xmin + 2) < 1e-2 + assert abs(xmax - 2) < 1e-2 + ymin, ymax = backend.ax.get_ylim() + assert abs(ymin + 2) < 1e-2 + assert abs(ymax - 2) < 1e-2 + zmin, zmax = backend.ax.get_zlim() + assert abs(zmin + 10) < 1e-2 + assert abs(zmax - 10) < 1e-2 + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_plot_size(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + p1 = plot(sin(x), backend="matplotlib", size=(8, 4), + adaptive=adaptive, n=10) + s1 = p1._backend.fig.get_size_inches() + assert (s1[0] == 8) and (s1[1] == 4) + p2 = plot(sin(x), backend="matplotlib", size=(5, 10), + adaptive=adaptive, n=10) + s2 = p2._backend.fig.get_size_inches() + assert (s2[0] == 5) and (s2[1] == 10) + p3 = PlotGrid(2, 1, p1, p2, size=(6, 2), + adaptive=adaptive, n=10) + s3 = p3._backend.fig.get_size_inches() + assert (s3[0] == 6) and (s3[1] == 2) + + with raises(ValueError): + plot(sin(x), backend="matplotlib", size=(-1, 3)) + + +def test_issue_20113(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + + # verify the capability to use custom backends + plot(sin(x), backend=Plot, show=False) + p2 = plot(sin(x), backend=MatplotlibBackend, show=False) + assert p2.backend == MatplotlibBackend + assert len(p2[0].get_data()[0]) >= 30 + p3 = plot(sin(x), backend=DummyBackendOk, show=False) + assert p3.backend == DummyBackendOk + assert len(p3[0].get_data()[0]) >= 30 + + # test for an improper coded backend + p4 = plot(sin(x), backend=DummyBackendNotOk, show=False) + assert p4.backend == DummyBackendNotOk + assert len(p4[0].get_data()[0]) >= 30 + with raises(NotImplementedError): + p4.show() + with raises(NotImplementedError): + p4.save("test/path") + with raises(NotImplementedError): + p4._backend.close() + + +def test_custom_coloring(): + x = Symbol('x') + y = Symbol('y') + plot(cos(x), line_color=lambda a: a) + plot(cos(x), line_color=1) + plot(cos(x), line_color="r") + plot_parametric(cos(x), sin(x), line_color=lambda a: a) + plot_parametric(cos(x), sin(x), line_color=1) + plot_parametric(cos(x), sin(x), line_color="r") + plot3d_parametric_line(cos(x), sin(x), x, line_color=lambda a: a) + plot3d_parametric_line(cos(x), sin(x), x, line_color=1) + plot3d_parametric_line(cos(x), sin(x), x, line_color="r") + plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, + (x, -5, 5), (y, -5, 5), + surface_color=lambda a, b: a**2 + b**2) + plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, + (x, -5, 5), (y, -5, 5), + surface_color=1) + plot3d_parametric_surface(cos(x + y), sin(x - y), x - y, + (x, -5, 5), (y, -5, 5), + surface_color="r") + plot3d(x*y, (x, -5, 5), (y, -5, 5), + surface_color=lambda a, b: a**2 + b**2) + plot3d(x*y, (x, -5, 5), (y, -5, 5), surface_color=1) + plot3d(x*y, (x, -5, 5), (y, -5, 5), surface_color="r") + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_deprecated_get_segments(adaptive): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + f = sin(x) + p = plot(f, (x, -10, 10), show=False, adaptive=adaptive, n=10) + with warns_deprecated_sympy(): + p[0].get_segments() + + +@pytest.mark.parametrize("adaptive", [True, False]) +def test_generic_data_series(adaptive): + # verify that no errors are raised when generic data series are used + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol("x") + p = plot(x, + markers=[{"args":[[0, 1], [0, 1]], "marker": "*", "linestyle": "none"}], + annotations=[{"text": "test", "xy": (0, 0)}], + fill={"x": [0, 1, 2, 3], "y1": [0, 1, 2, 3]}, + rectangles=[{"xy": (0, 0), "width": 5, "height": 1}], + adaptive=adaptive, n=10) + assert len(p._backend.ax.collections) == 1 + assert len(p._backend.ax.patches) == 1 + assert len(p._backend.ax.lines) == 2 + assert len(p._backend.ax.texts) == 1 + + +def test_deprecated_markers_annotations_rectangles_fill(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + p = plot(sin(x), (x, -10, 10), show=False) + with warns_deprecated_sympy(): + p.markers = [{"args":[[0, 1], [0, 1]], "marker": "*", "linestyle": "none"}] + assert len(p._series) == 2 + with warns_deprecated_sympy(): + p.annotations = [{"text": "test", "xy": (0, 0)}] + assert len(p._series) == 3 + with warns_deprecated_sympy(): + p.fill = {"x": [0, 1, 2, 3], "y1": [0, 1, 2, 3]} + assert len(p._series) == 4 + with warns_deprecated_sympy(): + p.rectangles = [{"xy": (0, 0), "width": 5, "height": 1}] + assert len(p._series) == 5 + + +def test_back_compatibility(): + if not matplotlib: + skip("Matplotlib not the default backend") + + x = Symbol('x') + y = Symbol('y') + p = plot(sin(x), adaptive=False, n=5) + assert len(p[0].get_points()) == 2 + assert len(p[0].get_data()) == 2 + p = plot_parametric(cos(x), sin(x), (x, 0, 2), adaptive=False, n=5) + assert len(p[0].get_points()) == 2 + assert len(p[0].get_data()) == 3 + p = plot3d_parametric_line(cos(x), sin(x), x, (x, 0, 2), + adaptive=False, n=5) + assert len(p[0].get_points()) == 3 + assert len(p[0].get_data()) == 4 + p = plot3d(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), n=5) + assert len(p[0].get_meshes()) == 3 + assert len(p[0].get_data()) == 3 + p = plot_contour(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), n=5) + assert len(p[0].get_meshes()) == 3 + assert len(p[0].get_data()) == 3 + p = plot3d_parametric_surface(x * cos(y), x * sin(y), x * cos(4 * y) / 2, + (x, 0, pi), (y, 0, 2*pi), n=5) + assert len(p[0].get_meshes()) == 3 + assert len(p[0].get_data()) == 5 + + +def test_plot_arguments(): + ### Test arguments for plot() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single expressions + p = plot(x + 1) + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x + 1" + assert p[0].rendering_kw == {} + + # single expressions custom label + p = plot(x + 1, "label") + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "label" + assert p[0].rendering_kw == {} + + # single expressions with range + p = plot(x + 1, (x, -2, 2)) + assert p[0].ranges == [(x, -2, 2)] + + # single expressions with range, label and rendering-kw dictionary + p = plot(x + 1, (x, -2, 2), "test", {"color": "r"}) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"color": "r"} + + # multiple expressions + p = plot(x + 1, x**2) + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x + 1" + assert p[0].rendering_kw == {} + assert isinstance(p[1], LineOver1DRangeSeries) + assert p[1].expr == x**2 + assert p[1].ranges == [(x, -10, 10)] + assert p[1].get_label(False) == "x**2" + assert p[1].rendering_kw == {} + + # multiple expressions over the same range + p = plot(x + 1, x**2, (x, 0, 5)) + assert p[0].ranges == [(x, 0, 5)] + assert p[1].ranges == [(x, 0, 5)] + + # multiple expressions over the same range with the same rendering kws + p = plot(x + 1, x**2, (x, 0, 5), {"color": "r"}) + assert p[0].ranges == [(x, 0, 5)] + assert p[1].ranges == [(x, 0, 5)] + assert p[0].rendering_kw == {"color": "r"} + assert p[1].rendering_kw == {"color": "r"} + + # multiple expressions with different ranges, labels and rendering kws + p = plot( + (x + 1, (x, 0, 5)), + (x**2, (x, -2, 2), "test", {"color": "r"})) + assert isinstance(p[0], LineOver1DRangeSeries) + assert p[0].expr == x + 1 + assert p[0].ranges == [(x, 0, 5)] + assert p[0].get_label(False) == "x + 1" + assert p[0].rendering_kw == {} + assert isinstance(p[1], LineOver1DRangeSeries) + assert p[1].expr == x**2 + assert p[1].ranges == [(x, -2, 2)] + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {"color": "r"} + + # single argument: lambda function + f = lambda t: t + p = plot(lambda t: t) + assert isinstance(p[0], LineOver1DRangeSeries) + assert callable(p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + + # single argument: lambda function + custom range and label + p = plot(f, ("t", -5, 6), "test") + assert p[0].ranges[0][1:] == (-5, 6) + assert p[0].get_label(False) == "test" + + +def test_plot_parametric_arguments(): + ### Test arguments for plot_parametric() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single parametric expression + p = plot_parametric(x + 1, x) + assert isinstance(p[0], Parametric2DLineSeries) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + + # single parametric expression with custom range, label and rendering kws + p = plot_parametric(x + 1, x, (x, -2, 2), "test", + {"cmap": "Reds"}) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + p = plot_parametric((x + 1, x), (x, -2, 2), "test") + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + # multiple parametric expressions same symbol + p = plot_parametric((x + 1, x), (x ** 2, x + 1)) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, x + 1) + assert p[1].ranges == [(x, -10, 10)] + assert p[1].get_label(False) == "x" + assert p[1].rendering_kw == {} + + # multiple parametric expressions different symbols + p = plot_parametric((x + 1, x), (y ** 2, y + 1, "test")) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (y ** 2, y + 1) + assert p[1].ranges == [(y, -10, 10)] + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {} + + # multiple parametric expressions same range + p = plot_parametric((x + 1, x), (x ** 2, x + 1), (x, -2, 2)) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, x + 1) + assert p[1].ranges == [(x, -2, 2)] + assert p[1].get_label(False) == "x" + assert p[1].rendering_kw == {} + + # multiple parametric expressions, custom ranges and labels + p = plot_parametric( + (x + 1, x, (x, -2, 2), "test1"), + (x ** 2, x + 1, (x, -3, 3), "test2", {"cmap": "Reds"})) + assert p[0].expr == (x + 1, x) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test1" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, x + 1) + assert p[1].ranges == [(x, -3, 3)] + assert p[1].get_label(False) == "test2" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # single argument: lambda function + fx = lambda t: t + fy = lambda t: 2 * t + p = plot_parametric(fx, fy) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert "Dummy" in p[0].get_label(False) + assert p[0].rendering_kw == {} + + # single argument: lambda function + custom range + label + p = plot_parametric(fx, fy, ("t", 0, 2), "test") + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (0, 2) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + +def test_plot3d_parametric_line_arguments(): + ### Test arguments for plot3d_parametric_line() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single parametric expression + p = plot3d_parametric_line(x + 1, x, sin(x)) + assert isinstance(p[0], Parametric3DLineSeries) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + + # single parametric expression with custom range, label and rendering kws + p = plot3d_parametric_line(x + 1, x, sin(x), (x, -2, 2), + "test", {"cmap": "Reds"}) + assert isinstance(p[0], Parametric3DLineSeries) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + p = plot3d_parametric_line((x + 1, x, sin(x)), (x, -2, 2), "test") + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -2, 2)] + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + # multiple parametric expression same symbol + p = plot3d_parametric_line( + (x + 1, x, sin(x)), (x ** 2, 1, cos(x), {"cmap": "Reds"})) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, 1, cos(x)) + assert p[1].ranges == [(x, -10, 10)] + assert p[1].get_label(False) == "x" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # multiple parametric expression different symbols + p = plot3d_parametric_line((x + 1, x, sin(x)), (y ** 2, 1, cos(y))) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (y ** 2, 1, cos(y)) + assert p[1].ranges == [(y, -10, 10)] + assert p[1].get_label(False) == "y" + assert p[1].rendering_kw == {} + + # multiple parametric expression, custom ranges and labels + p = plot3d_parametric_line( + (x + 1, x, sin(x)), + (x ** 2, 1, cos(x), (x, -2, 2), "test", {"cmap": "Reds"})) + assert p[0].expr == (x + 1, x, sin(x)) + assert p[0].ranges == [(x, -10, 10)] + assert p[0].get_label(False) == "x" + assert p[0].rendering_kw == {} + assert p[1].expr == (x ** 2, 1, cos(x)) + assert p[1].ranges == [(x, -2, 2)] + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # single argument: lambda function + fx = lambda t: t + fy = lambda t: 2 * t + fz = lambda t: 3 * t + p = plot3d_parametric_line(fx, fy, fz) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert "Dummy" in p[0].get_label(False) + assert p[0].rendering_kw == {} + + # single argument: lambda function + custom range + label + p = plot3d_parametric_line(fx, fy, fz, ("t", 0, 2), "test") + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (0, 2) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + +def test_plot3d_plot_contour_arguments(): + ### Test arguments for plot3d() and plot_contour() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single expression + p = plot3d(x + y) + assert isinstance(p[0], SurfaceOver2DRangeSeries) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + + # single expression, custom range, label and rendering kws + p = plot3d(x + y, (x, -2, 2), "test", {"cmap": "Reds"}) + assert isinstance(p[0], SurfaceOver2DRangeSeries) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -10, 10) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + p = plot3d(x + y, (x, -2, 2), (y, -4, 4), "test") + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + + # multiple expressions + p = plot3d(x + y, x * y) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + assert p[1].expr == x * y + assert p[1].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[1].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[1].get_label(False) == "x*y" + assert p[1].rendering_kw == {} + + # multiple expressions, same custom ranges + p = plot3d(x + y, x * y, (x, -2, 2), (y, -4, 4)) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + assert p[1].expr == x * y + assert p[1].ranges[0] == (x, -2, 2) + assert p[1].ranges[1] == (y, -4, 4) + assert p[1].get_label(False) == "x*y" + assert p[1].rendering_kw == {} + + # multiple expressions, custom ranges, labels and rendering kws + p = plot3d( + (x + y, (x, -2, 2), (y, -4, 4)), + (x * y, (x, -3, 3), (y, -6, 6), "test", {"cmap": "Reds"})) + assert p[0].expr == x + y + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + assert p[0].get_label(False) == "x + y" + assert p[0].rendering_kw == {} + assert p[1].expr == x * y + assert p[1].ranges[0] == (x, -3, 3) + assert p[1].ranges[1] == (y, -6, 6) + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # single expression: lambda function + f = lambda x, y: x + y + p = plot3d(f) + assert callable(p[0].expr) + assert p[0].ranges[0][1:] == (-10, 10) + assert p[0].ranges[1][1:] == (-10, 10) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + + # single expression: lambda function + custom ranges + label + p = plot3d(f, ("a", -5, 3), ("b", -2, 1), "test") + assert callable(p[0].expr) + assert p[0].ranges[0][1:] == (-5, 3) + assert p[0].ranges[1][1:] == (-2, 1) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + + # test issue 25818 + # single expression, custom range, min/max functions + p = plot3d(Min(x, y), (x, 0, 10), (y, 0, 10)) + assert isinstance(p[0], SurfaceOver2DRangeSeries) + assert p[0].expr == Min(x, y) + assert p[0].ranges[0] == (x, 0, 10) + assert p[0].ranges[1] == (y, 0, 10) + assert p[0].get_label(False) == "Min(x, y)" + assert p[0].rendering_kw == {} + + +def test_plot3d_parametric_surface_arguments(): + ### Test arguments for plot3d_parametric_surface() + if not matplotlib: + skip("Matplotlib not the default backend") + + x, y = symbols("x, y") + + # single parametric expression + p = plot3d_parametric_surface(x + y, cos(x + y), sin(x + y)) + assert isinstance(p[0], ParametricSurfaceSeries) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "(x + y, cos(x + y), sin(x + y))" + assert p[0].rendering_kw == {} + + # single parametric expression, custom ranges, labels and rendering kws + p = plot3d_parametric_surface(x + y, cos(x + y), sin(x + y), + (x, -2, 2), (y, -4, 4), "test", {"cmap": "Reds"}) + assert isinstance(p[0], ParametricSurfaceSeries) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -4, 4) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {"cmap": "Reds"} + + # multiple parametric expressions + p = plot3d_parametric_surface( + (x + y, cos(x + y), sin(x + y)), + (x - y, cos(x - y), sin(x - y), "test")) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[0].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[0].get_label(False) == "(x + y, cos(x + y), sin(x + y))" + assert p[0].rendering_kw == {} + assert p[1].expr == (x - y, cos(x - y), sin(x - y)) + assert p[1].ranges[0] == (x, -10, 10) or (y, -10, 10) + assert p[1].ranges[1] == (x, -10, 10) or (y, -10, 10) + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {} + + # multiple parametric expressions, custom ranges and labels + p = plot3d_parametric_surface( + (x + y, cos(x + y), sin(x + y), (x, -2, 2), "test"), + (x - y, cos(x - y), sin(x - y), (x, -3, 3), (y, -4, 4), + "test2", {"cmap": "Reds"})) + assert p[0].expr == (x + y, cos(x + y), sin(x + y)) + assert p[0].ranges[0] == (x, -2, 2) + assert p[0].ranges[1] == (y, -10, 10) + assert p[0].get_label(False) == "test" + assert p[0].rendering_kw == {} + assert p[1].expr == (x - y, cos(x - y), sin(x - y)) + assert p[1].ranges[0] == (x, -3, 3) + assert p[1].ranges[1] == (y, -4, 4) + assert p[1].get_label(False) == "test2" + assert p[1].rendering_kw == {"cmap": "Reds"} + + # lambda functions instead of symbolic expressions for a single 3D + # parametric surface + p = plot3d_parametric_surface( + lambda u, v: u, lambda u, v: v, lambda u, v: u + v, + ("u", 0, 2), ("v", -3, 4)) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (-0, 2) + assert p[0].ranges[1][1:] == (-3, 4) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + + # lambda functions instead of symbolic expressions for multiple 3D + # parametric surfaces + p = plot3d_parametric_surface( + (lambda u, v: u, lambda u, v: v, lambda u, v: u + v, + ("u", 0, 2), ("v", -3, 4)), + (lambda u, v: v, lambda u, v: u, lambda u, v: u - v, + ("u", -2, 3), ("v", -4, 5), "test")) + assert all(callable(t) for t in p[0].expr) + assert p[0].ranges[0][1:] == (0, 2) + assert p[0].ranges[1][1:] == (-3, 4) + assert p[0].get_label(False) == "" + assert p[0].rendering_kw == {} + assert all(callable(t) for t in p[1].expr) + assert p[1].ranges[0][1:] == (-2, 3) + assert p[1].ranges[1][1:] == (-4, 5) + assert p[1].get_label(False) == "test" + assert p[1].rendering_kw == {} diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png new file mode 100644 index 0000000000000000000000000000000000000000..07cac5b54f8a39774c151fc70a00552ba83fe5fc --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:115d0b9b81ed40f93fe9e216b4f6384cf71093e3bbb64a5d648b8b9858c645a0 +size 6864 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png new file mode 100644 index 0000000000000000000000000000000000000000..51c241e9825c28047cdd31fd64020ecb956a19e4 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_not.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dceffdfe73d6d78f453142c4713e51a88dbe9361f79c710b6df2400edd9c3bc9 +size 7939 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png new file mode 100644 index 0000000000000000000000000000000000000000..cafdc56f650a8c4d7af38fdfd8206891aa9d6cc2 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_region_xor.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92e71558103d03df0ea5c47876277968b5d4ca8ab8cf43b80b73cce9d962052c +size 10002 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_series.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..e23aa719153d20dc9b9e911e5cee097f0ea56211 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_series.py @@ -0,0 +1,1771 @@ +from sympy import ( + latex, exp, symbols, I, pi, sin, cos, tan, log, sqrt, + re, im, arg, frac, Sum, S, Abs, lambdify, + Function, dsolve, Eq, floor, Tuple +) +from sympy.external import import_module +from sympy.plotting.series import ( + LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries, + SurfaceOver2DRangeSeries, ContourSeries, ParametricSurfaceSeries, + ImplicitSeries, _set_discretization_points, List2DSeries +) +from sympy.testing.pytest import raises, warns, XFAIL, skip, ignore_warnings + +np = import_module('numpy') + + +def test_adaptive(): + # verify that adaptive-related keywords produces the expected results + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + + s1 = LineOver1DRangeSeries(sin(x), (x, -10, 10), "", adaptive=True, + depth=2) + x1, _ = s1.get_data() + s2 = LineOver1DRangeSeries(sin(x), (x, -10, 10), "", adaptive=True, + depth=5) + x2, _ = s2.get_data() + s3 = LineOver1DRangeSeries(sin(x), (x, -10, 10), "", adaptive=True) + x3, _ = s3.get_data() + assert len(x1) < len(x2) < len(x3) + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=True, depth=2) + x1, _, _, = s1.get_data() + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=True, depth=5) + x2, _, _ = s2.get_data() + s3 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=True) + x3, _, _ = s3.get_data() + assert len(x1) < len(x2) < len(x3) + + +def test_detect_poles(): + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + + s1 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles=False) + xx1, yy1 = s1.get_data() + s2 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles=True, eps=0.01) + xx2, yy2 = s2.get_data() + # eps is too small: doesn't detect any poles + s3 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles=True, eps=1e-06) + xx3, yy3 = s3.get_data() + s4 = LineOver1DRangeSeries(tan(x), (x, -pi, pi), + adaptive=False, n=1000, detect_poles="symbolic") + xx4, yy4 = s4.get_data() + + assert np.allclose(xx1, xx2) and np.allclose(xx1, xx3) and np.allclose(xx1, xx4) + assert not np.any(np.isnan(yy1)) + assert not np.any(np.isnan(yy3)) + assert np.any(np.isnan(yy2)) + assert np.any(np.isnan(yy4)) + assert len(s2.poles_locations) == len(s3.poles_locations) == 0 + assert len(s4.poles_locations) == 2 + assert np.allclose(np.abs(s4.poles_locations), np.pi / 2) + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some of", + test_stacklevel=False, + ): + s1 = LineOver1DRangeSeries(frac(x), (x, -10, 10), + adaptive=False, n=1000, detect_poles=False) + s2 = LineOver1DRangeSeries(frac(x), (x, -10, 10), + adaptive=False, n=1000, detect_poles=True, eps=0.05) + s3 = LineOver1DRangeSeries(frac(x), (x, -10, 10), + adaptive=False, n=1000, detect_poles="symbolic") + xx1, yy1 = s1.get_data() + xx2, yy2 = s2.get_data() + xx3, yy3 = s3.get_data() + assert np.allclose(xx1, xx2) and np.allclose(xx1, xx3) + assert not np.any(np.isnan(yy1)) + assert np.any(np.isnan(yy2)) and np.any(np.isnan(yy2)) + assert not np.allclose(yy1, yy2, equal_nan=True) + # The poles below are actually step discontinuities. + assert len(s3.poles_locations) == 21 + + s1 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles=False) + xx1, yy1 = s1.get_data() + s2 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles=True, eps=0.01) + xx2, yy2 = s2.get_data() + # eps is too small: doesn't detect any poles + s3 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles=True, eps=1e-06) + xx3, yy3 = s3.get_data() + s4 = LineOver1DRangeSeries(tan(u * x), (x, -pi, pi), params={u: 1}, + adaptive=False, n=1000, detect_poles="symbolic") + xx4, yy4 = s4.get_data() + + assert np.allclose(xx1, xx2) and np.allclose(xx1, xx3) and np.allclose(xx1, xx4) + assert not np.any(np.isnan(yy1)) + assert not np.any(np.isnan(yy3)) + assert np.any(np.isnan(yy2)) + assert np.any(np.isnan(yy4)) + assert len(s2.poles_locations) == len(s3.poles_locations) == 0 + assert len(s4.poles_locations) == 2 + assert np.allclose(np.abs(s4.poles_locations), np.pi / 2) + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some of", + test_stacklevel=False, + ): + u, v = symbols("u, v", real=True) + n = S(1) / 3 + f = (u + I * v)**n + r, i = re(f), im(f) + s1 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), (v, -2, 2), + adaptive=False, n=1000, detect_poles=False) + s2 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), (v, -2, 2), + adaptive=False, n=1000, detect_poles=True) + with ignore_warnings(RuntimeWarning): + xx1, yy1, pp1 = s1.get_data() + assert not np.isnan(yy1).any() + xx2, yy2, pp2 = s2.get_data() + assert np.isnan(yy2).any() + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some of", + test_stacklevel=False, + ): + f = (x * u + x * I * v)**n + r, i = re(f), im(f) + s1 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), + (v, -2, 2), params={x: 1}, + adaptive=False, n1=1000, detect_poles=False) + s2 = Parametric2DLineSeries(r.subs(u, -2), i.subs(u, -2), + (v, -2, 2), params={x: 1}, + adaptive=False, n1=1000, detect_poles=True) + with ignore_warnings(RuntimeWarning): + xx1, yy1, pp1 = s1.get_data() + assert not np.isnan(yy1).any() + xx2, yy2, pp2 = s2.get_data() + assert np.isnan(yy2).any() + + +def test_number_discretization_points(): + # verify that the different ways to set the number of discretization + # points are consistent with each other. + if not np: + skip("numpy not installed.") + + x, y, z = symbols("x:z") + + for pt in [LineOver1DRangeSeries, Parametric2DLineSeries, + Parametric3DLineSeries]: + kw1 = _set_discretization_points({"n": 10}, pt) + kw2 = _set_discretization_points({"n": [10, 20, 30]}, pt) + kw3 = _set_discretization_points({"n1": 10}, pt) + assert all(("n1" in kw) and kw["n1"] == 10 for kw in [kw1, kw2, kw3]) + + for pt in [SurfaceOver2DRangeSeries, ContourSeries, ParametricSurfaceSeries, + ImplicitSeries]: + kw1 = _set_discretization_points({"n": 10}, pt) + kw2 = _set_discretization_points({"n": [10, 20, 30]}, pt) + kw3 = _set_discretization_points({"n1": 10, "n2": 20}, pt) + assert kw1["n1"] == kw1["n2"] == 10 + assert all((kw["n1"] == 10) and (kw["n2"] == 20) for kw in [kw2, kw3]) + + # verify that line-related series can deal with large float number of + # discretization points + LineOver1DRangeSeries(cos(x), (x, -5, 5), adaptive=False, n=1e04).get_data() + + +def test_list2dseries(): + if not np: + skip("numpy not installed.") + + xx = np.linspace(-3, 3, 10) + yy1 = np.cos(xx) + yy2 = np.linspace(-3, 3, 20) + + # same number of elements: everything is fine + s = List2DSeries(xx, yy1) + assert not s.is_parametric + # different number of elements: error + raises(ValueError, lambda: List2DSeries(xx, yy2)) + + # no color func: returns only x, y components and s in not parametric + s = List2DSeries(xx, yy1) + xxs, yys = s.get_data() + assert np.allclose(xx, xxs) + assert np.allclose(yy1, yys) + assert not s.is_parametric + + +def test_interactive_vs_noninteractive(): + # verify that if a *Series class receives a `params` dictionary, it sets + # is_interactive=True + x, y, z, u, v = symbols("x, y, z, u, v") + + s = LineOver1DRangeSeries(cos(x), (x, -5, 5)) + assert not s.is_interactive + s = LineOver1DRangeSeries(u * cos(x), (x, -5, 5), params={u: 1}) + assert s.is_interactive + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5)) + assert not s.is_interactive + s = Parametric2DLineSeries(u * cos(x), u * sin(x), (x, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5)) + assert not s.is_interactive + s = Parametric3DLineSeries(u * cos(x), u * sin(x), x, (x, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -5, 5), (y, -5, 5)) + assert not s.is_interactive + s = SurfaceOver2DRangeSeries(u * cos(x * y), (x, -5, 5), (y, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = ContourSeries(cos(x * y), (x, -5, 5), (y, -5, 5)) + assert not s.is_interactive + s = ContourSeries(u * cos(x * y), (x, -5, 5), (y, -5, 5), + params={u: 1}) + assert s.is_interactive + + s = ParametricSurfaceSeries(u * cos(v), v * sin(u), u + v, + (u, -5, 5), (v, -5, 5)) + assert not s.is_interactive + s = ParametricSurfaceSeries(u * cos(v * x), v * sin(u), u + v, + (u, -5, 5), (v, -5, 5), params={x: 1}) + assert s.is_interactive + + +def test_lin_log_scale(): + # Verify that data series create the correct spacing in the data. + if not np: + skip("numpy not installed.") + + x, y, z = symbols("x, y, z") + + s = LineOver1DRangeSeries(x, (x, 1, 10), adaptive=False, n=50, + xscale="linear") + xx, _ = s.get_data() + assert np.isclose(xx[1] - xx[0], xx[-1] - xx[-2]) + + s = LineOver1DRangeSeries(x, (x, 1, 10), adaptive=False, n=50, + xscale="log") + xx, _ = s.get_data() + assert not np.isclose(xx[1] - xx[0], xx[-1] - xx[-2]) + + s = Parametric2DLineSeries( + cos(x), sin(x), (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="linear") + _, _, param = s.get_data() + assert np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = Parametric2DLineSeries( + cos(x), sin(x), (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="log") + _, _, param = s.get_data() + assert not np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = Parametric3DLineSeries( + cos(x), sin(x), x, (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="linear") + _, _, _, param = s.get_data() + assert np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = Parametric3DLineSeries( + cos(x), sin(x), x, (x, pi / 2, 1.5 * pi), adaptive=False, n=50, + xscale="log") + _, _, _, param = s.get_data() + assert not np.isclose(param[1] - param[0], param[-1] - param[-2]) + + s = SurfaceOver2DRangeSeries( + cos(x ** 2 + y ** 2), (x, 1, 5), (y, 1, 5), n=10, + xscale="linear", yscale="linear") + xx, yy, _ = s.get_data() + assert np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + s = SurfaceOver2DRangeSeries( + cos(x ** 2 + y ** 2), (x, 1, 5), (y, 1, 5), n=10, + xscale="log", yscale="log") + xx, yy, _ = s.get_data() + assert not np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert not np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + s = ImplicitSeries( + cos(x ** 2 + y ** 2) > 0, (x, 1, 5), (y, 1, 5), + n1=10, n2=10, xscale="linear", yscale="linear", adaptive=False) + xx, yy, _, _ = s.get_data() + assert np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + s = ImplicitSeries( + cos(x ** 2 + y ** 2) > 0, (x, 1, 5), (y, 1, 5), + n=10, xscale="log", yscale="log", adaptive=False) + xx, yy, _, _ = s.get_data() + assert not np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2]) + assert not np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0]) + + +def test_rendering_kw(): + # verify that each series exposes the `rendering_kw` attribute + if not np: + skip("numpy not installed.") + + u, v, x, y, z = symbols("u, v, x:z") + + s = List2DSeries([1, 2, 3], [4, 5, 6]) + assert isinstance(s.rendering_kw, dict) + + s = LineOver1DRangeSeries(1, (x, -5, 5)) + assert isinstance(s.rendering_kw, dict) + + s = Parametric2DLineSeries(sin(x), cos(x), (x, 0, pi)) + assert isinstance(s.rendering_kw, dict) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2 * pi)) + assert isinstance(s.rendering_kw, dict) + + s = SurfaceOver2DRangeSeries(x + y, (x, -2, 2), (y, -3, 3)) + assert isinstance(s.rendering_kw, dict) + + s = ContourSeries(x + y, (x, -2, 2), (y, -3, 3)) + assert isinstance(s.rendering_kw, dict) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1)) + assert isinstance(s.rendering_kw, dict) + + +def test_data_shape(): + # Verify that the series produces the correct data shape when the input + # expression is a number. + if not np: + skip("numpy not installed.") + + u, x, y, z = symbols("u, x:z") + + # scalar expression: it should return a numpy ones array + s = LineOver1DRangeSeries(1, (x, -5, 5)) + xx, yy = s.get_data() + assert len(xx) == len(yy) + assert np.all(yy == 1) + + s = LineOver1DRangeSeries(1, (x, -5, 5), adaptive=False, n=10) + xx, yy = s.get_data() + assert len(xx) == len(yy) == 10 + assert np.all(yy == 1) + + s = Parametric2DLineSeries(sin(x), 1, (x, 0, pi)) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(yy == 1) + + s = Parametric2DLineSeries(1, sin(x), (x, 0, pi)) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(xx == 1) + + s = Parametric2DLineSeries(sin(x), 1, (x, 0, pi), adaptive=False) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(yy == 1) + + s = Parametric2DLineSeries(1, sin(x), (x, 0, pi), adaptive=False) + xx, yy, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(param)) + assert np.all(xx == 1) + + s = Parametric3DLineSeries(cos(x), sin(x), 1, (x, 0, 2 * pi)) + xx, yy, zz, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param)) + assert np.all(zz == 1) + + s = Parametric3DLineSeries(cos(x), 1, x, (x, 0, 2 * pi)) + xx, yy, zz, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param)) + assert np.all(yy == 1) + + s = Parametric3DLineSeries(1, sin(x), x, (x, 0, 2 * pi)) + xx, yy, zz, param = s.get_data() + assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param)) + assert np.all(xx == 1) + + s = SurfaceOver2DRangeSeries(1, (x, -2, 2), (y, -3, 3)) + xx, yy, zz = s.get_data() + assert (xx.shape == yy.shape) and (xx.shape == zz.shape) + assert np.all(zz == 1) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1)) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape + assert np.all(xx == 1) + + s = ParametricSurfaceSeries(1, 1, y, (x, 0, 1), (y, 0, 1)) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape + assert np.all(yy == 1) + + s = ParametricSurfaceSeries(x, 1, 1, (x, 0, 1), (y, 0, 1)) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape + assert np.all(zz == 1) + + +def test_only_integers(): + if not np: + skip("numpy not installed.") + + x, y, u, v = symbols("x, y, u, v") + + s = LineOver1DRangeSeries(sin(x), (x, -5.5, 4.5), "", + adaptive=False, only_integers=True) + xx, _ = s.get_data() + assert len(xx) == 10 + assert xx[0] == -5 and xx[-1] == 4 + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2 * pi), "", + adaptive=False, only_integers=True) + _, _, p = s.get_data() + assert len(p) == 7 + assert p[0] == 0 and p[-1] == 6 + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2 * pi), "", + adaptive=False, only_integers=True) + _, _, _, p = s.get_data() + assert len(p) == 7 + assert p[0] == 0 and p[-1] == 6 + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -5.5, 5.5), + (y, -3.5, 3.5), "", + adaptive=False, only_integers=True) + xx, yy, _ = s.get_data() + assert xx.shape == yy.shape == (7, 11) + assert np.allclose(xx[:, 0] - (-5) * np.ones(7), 0) + assert np.allclose(xx[0, :] - np.linspace(-5, 5, 11), 0) + assert np.allclose(yy[:, 0] - np.linspace(-3, 3, 7), 0) + assert np.allclose(yy[0, :] - (-3) * np.ones(11), 0) + + r = 2 + sin(7 * u + 5 * v) + expr = ( + r * cos(u) * sin(v), + r * sin(u) * sin(v), + r * cos(v) + ) + s = ParametricSurfaceSeries(*expr, (u, 0, 2 * pi), (v, 0, pi), "", + adaptive=False, only_integers=True) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape == (4, 7) + + # only_integers also works with scalar expressions + s = LineOver1DRangeSeries(1, (x, -5.5, 4.5), "", + adaptive=False, only_integers=True) + xx, _ = s.get_data() + assert len(xx) == 10 + assert xx[0] == -5 and xx[-1] == 4 + + s = Parametric2DLineSeries(cos(x), 1, (x, 0, 2 * pi), "", + adaptive=False, only_integers=True) + _, _, p = s.get_data() + assert len(p) == 7 + assert p[0] == 0 and p[-1] == 6 + + s = SurfaceOver2DRangeSeries(1, (x, -5.5, 5.5), (y, -3.5, 3.5), "", + adaptive=False, only_integers=True) + xx, yy, _ = s.get_data() + assert xx.shape == yy.shape == (7, 11) + assert np.allclose(xx[:, 0] - (-5) * np.ones(7), 0) + assert np.allclose(xx[0, :] - np.linspace(-5, 5, 11), 0) + assert np.allclose(yy[:, 0] - np.linspace(-3, 3, 7), 0) + assert np.allclose(yy[0, :] - (-3) * np.ones(11), 0) + + r = 2 + sin(7 * u + 5 * v) + expr = ( + r * cos(u) * sin(v), + 1, + r * cos(v) + ) + s = ParametricSurfaceSeries(*expr, (u, 0, 2 * pi), (v, 0, pi), "", + adaptive=False, only_integers=True) + xx, yy, zz, uu, vv = s.get_data() + assert xx.shape == yy.shape == zz.shape == uu.shape == vv.shape == (4, 7) + + +def test_is_point_is_filled(): + # verify that `is_point` and `is_filled` are attributes and that they + # they receive the correct values + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + + s = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + s = List2DSeries([0, 1, 2], [3, 4, 5], + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = List2DSeries([0, 1, 2], [3, 4, 5], + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + is_point=False, is_filled=True) + assert (not s.is_point) and s.is_filled + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + is_point=True, is_filled=False) + assert s.is_point and (not s.is_filled) + + +def test_is_filled_2d(): + # verify that the is_filled attribute is exposed by the following series + x, y = symbols("x, y") + + expr = cos(x**2 + y**2) + ranges = (x, -2, 2), (y, -2, 2) + + s = ContourSeries(expr, *ranges) + assert s.is_filled + s = ContourSeries(expr, *ranges, is_filled=True) + assert s.is_filled + s = ContourSeries(expr, *ranges, is_filled=False) + assert not s.is_filled + + +def test_steps(): + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + + def do_test(s1, s2): + if (not s1.is_parametric) and s1.is_2Dline: + xx1, _ = s1.get_data() + xx2, _ = s2.get_data() + elif s1.is_parametric and s1.is_2Dline: + xx1, _, _ = s1.get_data() + xx2, _, _ = s2.get_data() + elif (not s1.is_parametric) and s1.is_3Dline: + xx1, _, _ = s1.get_data() + xx2, _, _ = s2.get_data() + else: + xx1, _, _, _ = s1.get_data() + xx2, _, _, _ = s2.get_data() + assert len(xx1) != len(xx2) + + s1 = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + adaptive=False, n=40, steps=False) + s2 = LineOver1DRangeSeries(cos(x), (x, -5, 5), "", + adaptive=False, n=40, steps=True) + do_test(s1, s2) + + s1 = List2DSeries([0, 1, 2], [3, 4, 5], steps=False) + s2 = List2DSeries([0, 1, 2], [3, 4, 5], steps=True) + do_test(s1, s2) + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + adaptive=False, n=40, steps=False) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + adaptive=False, n=40, steps=True) + do_test(s1, s2) + + s1 = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + adaptive=False, n=40, steps=False) + s2 = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + adaptive=False, n=40, steps=True) + do_test(s1, s2) + + +def test_interactive_data(): + # verify that InteractiveSeries produces the same numerical data as their + # corresponding non-interactive series. + if not np: + skip("numpy not installed.") + + u, x, y, z = symbols("u, x:z") + + def do_test(data1, data2): + assert len(data1) == len(data2) + for d1, d2 in zip(data1, data2): + assert np.allclose(d1, d2) + + s1 = LineOver1DRangeSeries(u * cos(x), (x, -5, 5), params={u: 1}, n=50) + s2 = LineOver1DRangeSeries(cos(x), (x, -5, 5), adaptive=False, n=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = Parametric2DLineSeries( + u * cos(x), u * sin(x), (x, -5, 5), params={u: 1}, n=50) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), + adaptive=False, n=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = Parametric3DLineSeries( + u * cos(x), u * sin(x), u * x, (x, -5, 5), + params={u: 1}, n=50) + s2 = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), + adaptive=False, n=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = SurfaceOver2DRangeSeries( + u * cos(x ** 2 + y ** 2), (x, -3, 3), (y, -3, 3), + params={u: 1}, n1=50, n2=50,) + s2 = SurfaceOver2DRangeSeries( + cos(x ** 2 + y ** 2), (x, -3, 3), (y, -3, 3), + adaptive=False, n1=50, n2=50) + do_test(s1.get_data(), s2.get_data()) + + s1 = ParametricSurfaceSeries( + u * cos(x + y), sin(x + y), x - y, (x, -3, 3), (y, -3, 3), + params={u: 1}, n1=50, n2=50,) + s2 = ParametricSurfaceSeries( + cos(x + y), sin(x + y), x - y, (x, -3, 3), (y, -3, 3), + adaptive=False, n1=50, n2=50,) + do_test(s1.get_data(), s2.get_data()) + + # real part of a complex function evaluated over a real line with numpy + expr = re((z ** 2 + 1) / (z ** 2 - 1)) + s1 = LineOver1DRangeSeries(u * expr, (z, -3, 3), adaptive=False, n=50, + modules=None, params={u: 1}) + s2 = LineOver1DRangeSeries(expr, (z, -3, 3), adaptive=False, n=50, + modules=None) + do_test(s1.get_data(), s2.get_data()) + + # real part of a complex function evaluated over a real line with mpmath + expr = re((z ** 2 + 1) / (z ** 2 - 1)) + s1 = LineOver1DRangeSeries(u * expr, (z, -3, 3), n=50, modules="mpmath", + params={u: 1}) + s2 = LineOver1DRangeSeries(expr, (z, -3, 3), + adaptive=False, n=50, modules="mpmath") + do_test(s1.get_data(), s2.get_data()) + + +def test_list2dseries_interactive(): + if not np: + skip("numpy not installed.") + + x, y, u = symbols("x, y, u") + + s = List2DSeries([1, 2, 3], [1, 2, 3]) + assert not s.is_interactive + + # symbolic expressions as coordinates, but no ``params`` + raises(ValueError, lambda: List2DSeries([cos(x)], [sin(x)])) + + # too few parameters + raises(ValueError, + lambda: List2DSeries([cos(x), y], [sin(x), 2], params={u: 1})) + + s = List2DSeries([cos(x)], [sin(x)], params={x: 1}) + assert s.is_interactive + + s = List2DSeries([x, 2, 3, 4], [4, 3, 2, x], params={x: 3}) + xx, yy = s.get_data() + assert np.allclose(xx, [3, 2, 3, 4]) + assert np.allclose(yy, [4, 3, 2, 3]) + assert not s.is_parametric + + # numeric lists + params is present -> interactive series and + # lists are converted to Tuple. + s = List2DSeries([1, 2, 3], [1, 2, 3], params={x: 1}) + assert s.is_interactive + assert isinstance(s.list_x, Tuple) + assert isinstance(s.list_y, Tuple) + + +def test_mpmath(): + # test that the argument of complex functions evaluated with mpmath + # might be different than the one computed with Numpy (different + # behaviour at branch cuts) + if not np: + skip("numpy not installed.") + + z, u = symbols("z, u") + + s1 = LineOver1DRangeSeries(im(sqrt(-z)), (z, 1e-03, 5), + adaptive=True, modules=None, force_real_eval=True) + s2 = LineOver1DRangeSeries(im(sqrt(-z)), (z, 1e-03, 5), + adaptive=True, modules="mpmath", force_real_eval=True) + xx1, yy1 = s1.get_data() + xx2, yy2 = s2.get_data() + assert np.all(yy1 < 0) + assert np.all(yy2 > 0) + + s1 = LineOver1DRangeSeries(im(sqrt(-z)), (z, -5, 5), + adaptive=False, n=20, modules=None, force_real_eval=True) + s2 = LineOver1DRangeSeries(im(sqrt(-z)), (z, -5, 5), + adaptive=False, n=20, modules="mpmath", force_real_eval=True) + xx1, yy1 = s1.get_data() + xx2, yy2 = s2.get_data() + assert np.allclose(xx1, xx2) + assert not np.allclose(yy1, yy2) + + +def test_str(): + u, x, y, z = symbols("u, x:z") + + s = LineOver1DRangeSeries(cos(x), (x, -4, 3)) + assert str(s) == "cartesian line: cos(x) for x over (-4.0, 3.0)" + + d = {"return": "real"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: re(cos(x)) for x over (-4.0, 3.0)" + + d = {"return": "imag"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: im(cos(x)) for x over (-4.0, 3.0)" + + d = {"return": "abs"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: abs(cos(x)) for x over (-4.0, 3.0)" + + d = {"return": "arg"} + s = LineOver1DRangeSeries(cos(x), (x, -4, 3), **d) + assert str(s) == "cartesian line: arg(cos(x)) for x over (-4.0, 3.0)" + + s = LineOver1DRangeSeries(cos(u * x), (x, -4, 3), params={u: 1}) + assert str(s) == "interactive cartesian line: cos(u*x) for x over (-4.0, 3.0) and parameters (u,)" + + s = LineOver1DRangeSeries(cos(u * x), (x, -u, 3*y), params={u: 1, y: 1}) + assert str(s) == "interactive cartesian line: cos(u*x) for x over (-u, 3*y) and parameters (u, y)" + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -4, 3)) + assert str(s) == "parametric cartesian line: (cos(x), sin(x)) for x over (-4.0, 3.0)" + + s = Parametric2DLineSeries(cos(u * x), sin(x), (x, -4, 3), params={u: 1}) + assert str(s) == "interactive parametric cartesian line: (cos(u*x), sin(x)) for x over (-4.0, 3.0) and parameters (u,)" + + s = Parametric2DLineSeries(cos(u * x), sin(x), (x, -u, 3*y), params={u: 1, y:1}) + assert str(s) == "interactive parametric cartesian line: (cos(u*x), sin(x)) for x over (-u, 3*y) and parameters (u, y)" + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -4, 3)) + assert str(s) == "3D parametric cartesian line: (cos(x), sin(x), x) for x over (-4.0, 3.0)" + + s = Parametric3DLineSeries(cos(u*x), sin(x), x, (x, -4, 3), params={u: 1}) + assert str(s) == "interactive 3D parametric cartesian line: (cos(u*x), sin(x), x) for x over (-4.0, 3.0) and parameters (u,)" + + s = Parametric3DLineSeries(cos(u*x), sin(x), x, (x, -u, 3*y), params={u: 1, y: 1}) + assert str(s) == "interactive 3D parametric cartesian line: (cos(u*x), sin(x), x) for x over (-u, 3*y) and parameters (u, y)" + + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -4, 3), (y, -2, 5)) + assert str(s) == "cartesian surface: cos(x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)" + + s = SurfaceOver2DRangeSeries(cos(u * x * y), (x, -4, 3), (y, -2, 5), params={u: 1}) + assert str(s) == "interactive cartesian surface: cos(u*x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0) and parameters (u,)" + + s = SurfaceOver2DRangeSeries(cos(u * x * y), (x, -4*u, 3), (y, -2, 5*u), params={u: 1}) + assert str(s) == "interactive cartesian surface: cos(u*x*y) for x over (-4*u, 3.0) and y over (-2.0, 5*u) and parameters (u,)" + + s = ContourSeries(cos(x * y), (x, -4, 3), (y, -2, 5)) + assert str(s) == "contour: cos(x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)" + + s = ContourSeries(cos(u * x * y), (x, -4, 3), (y, -2, 5), params={u: 1}) + assert str(s) == "interactive contour: cos(u*x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0) and parameters (u,)" + + s = ParametricSurfaceSeries(cos(x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5)) + assert str(s) == "parametric cartesian surface: (cos(x*y), sin(x*y), x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)" + + s = ParametricSurfaceSeries(cos(u * x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5), params={u: 1}) + assert str(s) == "interactive parametric cartesian surface: (cos(u*x*y), sin(x*y), x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0) and parameters (u,)" + + s = ImplicitSeries(x < y, (x, -5, 4), (y, -3, 2)) + assert str(s) == "Implicit expression: x < y for x over (-5.0, 4.0) and y over (-3.0, 2.0)" + + +def test_use_cm(): + # verify that the `use_cm` attribute is implemented. + if not np: + skip("numpy not installed.") + + u, x, y, z = symbols("u, x:z") + + s = List2DSeries([1, 2, 3, 4], [5, 6, 7, 8], use_cm=True) + assert s.use_cm + s = List2DSeries([1, 2, 3, 4], [5, 6, 7, 8], use_cm=False) + assert not s.use_cm + + s = Parametric2DLineSeries(cos(x), sin(x), (x, -4, 3), use_cm=True) + assert s.use_cm + s = Parametric2DLineSeries(cos(x), sin(x), (x, -4, 3), use_cm=False) + assert not s.use_cm + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -4, 3), + use_cm=True) + assert s.use_cm + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -4, 3), + use_cm=False) + assert not s.use_cm + + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -4, 3), (y, -2, 5), + use_cm=True) + assert s.use_cm + s = SurfaceOver2DRangeSeries(cos(x * y), (x, -4, 3), (y, -2, 5), + use_cm=False) + assert not s.use_cm + + s = ParametricSurfaceSeries(cos(x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5), use_cm=True) + assert s.use_cm + s = ParametricSurfaceSeries(cos(x * y), sin(x * y), x * y, + (x, -4, 3), (y, -2, 5), use_cm=False) + assert not s.use_cm + + +def test_surface_use_cm(): + # verify that SurfaceOver2DRangeSeries and ParametricSurfaceSeries get + # the same value for use_cm + + x, y, u, v = symbols("x, y, u, v") + + # they read the same value from default settings + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2)) + s2 = ParametricSurfaceSeries(u * cos(v), u * sin(v), u, + (u, 0, 1), (v, 0 , 2*pi)) + assert s1.use_cm == s2.use_cm + + # they get the same value + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + use_cm=False) + s2 = ParametricSurfaceSeries(u * cos(v), u * sin(v), u, + (u, 0, 1), (v, 0 , 2*pi), use_cm=False) + assert s1.use_cm == s2.use_cm + + # they get the same value + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + use_cm=True) + s2 = ParametricSurfaceSeries(u * cos(v), u * sin(v), u, + (u, 0, 1), (v, 0 , 2*pi), use_cm=True) + assert s1.use_cm == s2.use_cm + + +def test_sums(): + # test that data series are able to deal with sums + if not np: + skip("numpy not installed.") + + x, y, u = symbols("x, y, u") + + def do_test(data1, data2): + assert len(data1) == len(data2) + for d1, d2 in zip(data1, data2): + assert np.allclose(d1, d2) + + s = LineOver1DRangeSeries(Sum(1 / x ** y, (x, 1, 1000)), (y, 2, 10), + adaptive=False, only_integers=True) + xx, yy = s.get_data() + + s1 = LineOver1DRangeSeries(Sum(1 / x, (x, 1, y)), (y, 2, 10), + adaptive=False, only_integers=True) + xx1, yy1 = s1.get_data() + + s2 = LineOver1DRangeSeries(Sum(u / x, (x, 1, y)), (y, 2, 10), + params={u: 1}, only_integers=True) + xx2, yy2 = s2.get_data() + xx1 = xx1.astype(float) + xx2 = xx2.astype(float) + do_test([xx1, yy1], [xx2, yy2]) + + s = LineOver1DRangeSeries(Sum(1 / x, (x, 1, y)), (y, 2, 10), + adaptive=True) + with warns( + UserWarning, + match="The evaluation with NumPy/SciPy failed", + test_stacklevel=False, + ): + raises(TypeError, lambda: s.get_data()) + + +def test_apply_transforms(): + # verify that transformation functions get applied to the output + # of data series + if not np: + skip("numpy not installed.") + + x, y, z, u, v = symbols("x:z, u, v") + + s1 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10) + s2 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10, + tx=np.rad2deg) + s3 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10, + ty=np.rad2deg) + s4 = LineOver1DRangeSeries(cos(x), (x, -2*pi, 2*pi), adaptive=False, n=10, + tx=np.rad2deg, ty=np.rad2deg) + + x1, y1 = s1.get_data() + x2, y2 = s2.get_data() + x3, y3 = s3.get_data() + x4, y4 = s4.get_data() + assert np.isclose(x1[0], -2*np.pi) and np.isclose(x1[-1], 2*np.pi) + assert (y1.min() < -0.9) and (y1.max() > 0.9) + assert np.isclose(x2[0], -360) and np.isclose(x2[-1], 360) + assert (y2.min() < -0.9) and (y2.max() > 0.9) + assert np.isclose(x3[0], -2*np.pi) and np.isclose(x3[-1], 2*np.pi) + assert (y3.min() < -52) and (y3.max() > 52) + assert np.isclose(x4[0], -360) and np.isclose(x4[-1], 360) + assert (y4.min() < -52) and (y4.max() > 52) + + xx = np.linspace(-2*np.pi, 2*np.pi, 10) + yy = np.cos(xx) + s1 = List2DSeries(xx, yy) + s2 = List2DSeries(xx, yy, tx=np.rad2deg, ty=np.rad2deg) + x1, y1 = s1.get_data() + x2, y2 = s2.get_data() + assert np.isclose(x1[0], -2*np.pi) and np.isclose(x1[-1], 2*np.pi) + assert (y1.min() < -0.9) and (y1.max() > 0.9) + assert np.isclose(x2[0], -360) and np.isclose(x2[-1], 360) + assert (y2.min() < -52) and (y2.max() > 52) + + s1 = Parametric2DLineSeries( + sin(x), cos(x), (x, -pi, pi), adaptive=False, n=10) + s2 = Parametric2DLineSeries( + sin(x), cos(x), (x, -pi, pi), adaptive=False, n=10, + tx=np.rad2deg, ty=np.rad2deg, tp=np.rad2deg) + x1, y1, a1 = s1.get_data() + x2, y2, a2 = s2.get_data() + assert np.allclose(x1, np.deg2rad(x2)) + assert np.allclose(y1, np.deg2rad(y2)) + assert np.allclose(a1, np.deg2rad(a2)) + + s1 = Parametric3DLineSeries( + sin(x), cos(x), x, (x, -pi, pi), adaptive=False, n=10) + s2 = Parametric3DLineSeries( + sin(x), cos(x), x, (x, -pi, pi), adaptive=False, n=10, tp=np.rad2deg) + x1, y1, z1, a1 = s1.get_data() + x2, y2, z2, a2 = s2.get_data() + assert np.allclose(x1, x2) + assert np.allclose(y1, y2) + assert np.allclose(z1, z2) + assert np.allclose(a1, np.deg2rad(a2)) + + s1 = SurfaceOver2DRangeSeries( + cos(x**2 + y**2), (x, -2*pi, 2*pi), (y, -2*pi, 2*pi), + adaptive=False, n1=10, n2=10) + s2 = SurfaceOver2DRangeSeries( + cos(x**2 + y**2), (x, -2*pi, 2*pi), (y, -2*pi, 2*pi), + adaptive=False, n1=10, n2=10, + tx=np.rad2deg, ty=lambda x: 2*x, tz=lambda x: 3*x) + x1, y1, z1 = s1.get_data() + x2, y2, z2 = s2.get_data() + assert np.allclose(x1, np.deg2rad(x2)) + assert np.allclose(y1, y2 / 2) + assert np.allclose(z1, z2 / 3) + + s1 = ParametricSurfaceSeries( + u + v, u - v, u * v, (u, 0, 2*pi), (v, 0, pi), + adaptive=False, n1=10, n2=10) + s2 = ParametricSurfaceSeries( + u + v, u - v, u * v, (u, 0, 2*pi), (v, 0, pi), + adaptive=False, n1=10, n2=10, + tx=np.rad2deg, ty=lambda x: 2*x, tz=lambda x: 3*x) + x1, y1, z1, u1, v1 = s1.get_data() + x2, y2, z2, u2, v2 = s2.get_data() + assert np.allclose(x1, np.deg2rad(x2)) + assert np.allclose(y1, y2 / 2) + assert np.allclose(z1, z2 / 3) + assert np.allclose(u1, u2) + assert np.allclose(v1, v2) + + +def test_series_labels(): + # verify that series return the correct label, depending on the plot + # type and input arguments. If the user set custom label on a data series, + # it should returned un-modified. + if not np: + skip("numpy not installed.") + + x, y, z, u, v = symbols("x, y, z, u, v") + wrapper = "$%s$" + + expr = cos(x) + s1 = LineOver1DRangeSeries(expr, (x, -2, 2), None) + s2 = LineOver1DRangeSeries(expr, (x, -2, 2), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + s1 = List2DSeries([0, 1, 2, 3], [0, 1, 2, 3], "test") + assert s1.get_label(False) == "test" + assert s1.get_label(True) == "test" + + expr = (cos(x), sin(x)) + s1 = Parametric2DLineSeries(*expr, (x, -2, 2), None, use_cm=True) + s2 = Parametric2DLineSeries(*expr, (x, -2, 2), "test", use_cm=True) + s3 = Parametric2DLineSeries(*expr, (x, -2, 2), None, use_cm=False) + s4 = Parametric2DLineSeries(*expr, (x, -2, 2), "test", use_cm=False) + assert s1.get_label(False) == "x" + assert s1.get_label(True) == wrapper % "x" + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + assert s3.get_label(False) == str(expr) + assert s3.get_label(True) == wrapper % latex(expr) + assert s4.get_label(False) == "test" + assert s4.get_label(True) == "test" + + expr = (cos(x), sin(x), x) + s1 = Parametric3DLineSeries(*expr, (x, -2, 2), None, use_cm=True) + s2 = Parametric3DLineSeries(*expr, (x, -2, 2), "test", use_cm=True) + s3 = Parametric3DLineSeries(*expr, (x, -2, 2), None, use_cm=False) + s4 = Parametric3DLineSeries(*expr, (x, -2, 2), "test", use_cm=False) + assert s1.get_label(False) == "x" + assert s1.get_label(True) == wrapper % "x" + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + assert s3.get_label(False) == str(expr) + assert s3.get_label(True) == wrapper % latex(expr) + assert s4.get_label(False) == "test" + assert s4.get_label(True) == "test" + + expr = cos(x**2 + y**2) + s1 = SurfaceOver2DRangeSeries(expr, (x, -2, 2), (y, -2, 2), None) + s2 = SurfaceOver2DRangeSeries(expr, (x, -2, 2), (y, -2, 2), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + expr = (cos(x - y), sin(x + y), x - y) + s1 = ParametricSurfaceSeries(*expr, (x, -2, 2), (y, -2, 2), None) + s2 = ParametricSurfaceSeries(*expr, (x, -2, 2), (y, -2, 2), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + expr = Eq(cos(x - y), 0) + s1 = ImplicitSeries(expr, (x, -10, 10), (y, -10, 10), None) + s2 = ImplicitSeries(expr, (x, -10, 10), (y, -10, 10), "test") + assert s1.get_label(False) == str(expr) + assert s1.get_label(True) == wrapper % latex(expr) + assert s2.get_label(False) == "test" + assert s2.get_label(True) == "test" + + +def test_is_polar_2d_parametric(): + # verify that Parametric2DLineSeries isable to apply polar discretization, + # which is used when polar_plot is executed with polar_axis=True + if not np: + skip("numpy not installed.") + + t, u = symbols("t u") + + # NOTE: a sufficiently big n must be provided, or else tests + # are going to fail + # No colormap + f = sin(4 * t) + s1 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=False, use_cm=False) + x1, y1, p1 = s1.get_data() + s2 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=True, use_cm=False) + th, r, p2 = s2.get_data() + assert (not np.allclose(x1, th)) and (not np.allclose(y1, r)) + assert np.allclose(p1, p2) + + # With colormap + s3 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=False, color_func=lambda t: 2*t) + x3, y3, p3 = s3.get_data() + s4 = Parametric2DLineSeries(f * cos(t), f * sin(t), (t, 0, 2*pi), + adaptive=False, n=10, is_polar=True, color_func=lambda t: 2*t) + th4, r4, p4 = s4.get_data() + assert np.allclose(p3, p4) and (not np.allclose(p1, p3)) + assert np.allclose(x3, x1) and np.allclose(y3, y1) + assert np.allclose(th4, th) and np.allclose(r4, r) + + +def test_is_polar_3d(): + # verify that SurfaceOver2DRangeSeries is able to apply + # polar discretization + if not np: + skip("numpy not installed.") + + x, y, t = symbols("x, y, t") + expr = (x**2 - 1)**2 + s1 = SurfaceOver2DRangeSeries(expr, (x, 0, 1.5), (y, 0, 2 * pi), + n=10, adaptive=False, is_polar=False) + s2 = SurfaceOver2DRangeSeries(expr, (x, 0, 1.5), (y, 0, 2 * pi), + n=10, adaptive=False, is_polar=True) + x1, y1, z1 = s1.get_data() + x2, y2, z2 = s2.get_data() + x22, y22 = x1 * np.cos(y1), x1 * np.sin(y1) + assert np.allclose(x2, x22) + assert np.allclose(y2, y22) + + +def test_color_func(): + # verify that eval_color_func produces the expected results in order to + # maintain back compatibility with the old sympy.plotting module + if not np: + skip("numpy not installed.") + + x, y, z, u, v = symbols("x, y, z, u, v") + + # color func: returns x, y, color and s is parametric + xx = np.linspace(-3, 3, 10) + yy1 = np.cos(xx) + s = List2DSeries(xx, yy1, color_func=lambda x, y: 2 * x, use_cm=True) + xxs, yys, col = s.get_data() + assert np.allclose(xx, xxs) + assert np.allclose(yy1, yys) + assert np.allclose(2 * xx, col) + assert s.is_parametric + + s = List2DSeries(xx, yy1, color_func=lambda x, y: 2 * x, use_cm=False) + assert len(s.get_data()) == 2 + assert not s.is_parametric + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: t) + xx, yy, col = s.get_data() + assert (not np.allclose(xx, col)) and (not np.allclose(yy, col)) + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y: x * y) + xx, yy, col = s.get_data() + assert np.allclose(col, xx * yy) + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y, t: x * y * t) + xx, yy, col = s.get_data() + assert np.allclose(col, xx * yy * np.linspace(0, 2*np.pi, 10)) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: t) + xx, yy, zz, col = s.get_data() + assert (not np.allclose(xx, col)) and (not np.allclose(yy, col)) + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y, z: x * y * z) + xx, yy, zz, col = s.get_data() + assert np.allclose(col, xx * yy * zz) + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda x, y, z, t: x * y * z * t) + xx, yy, zz, col = s.get_data() + assert np.allclose(col, xx * yy * zz * np.linspace(0, 2*np.pi, 10)) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x: x) + xx, yy, zz = s.get_data() + col = s.eval_color_func(xx, yy, zz) + assert np.allclose(xx, col) + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x, y: x * y) + xx, yy, zz = s.get_data() + col = s.eval_color_func(xx, yy, zz) + assert np.allclose(xx * yy, col) + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x, y, z: x * y * z) + xx, yy, zz = s.get_data() + col = s.eval_color_func(xx, yy, zz) + assert np.allclose(xx * yy * zz, col) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda u:u) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(uu, col) + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda u, v: u * v) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(uu * vv, col) + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda x, y, z: x * y * z) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(xx * yy * zz, col) + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda x, y, z, u, v: x * y * z * u * v) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(xx * yy * zz * uu * vv, col) + + # Interactive Series + s = List2DSeries([0, 1, 2, x], [x, 2, 3, 4], + color_func=lambda x, y: 2 * x, params={x: 1}, use_cm=True) + xx, yy, col = s.get_data() + assert np.allclose(xx, [0, 1, 2, 1]) + assert np.allclose(yy, [1, 2, 3, 4]) + assert np.allclose(2 * xx, col) + assert s.is_parametric and s.use_cm + + s = List2DSeries([0, 1, 2, x], [x, 2, 3, 4], + color_func=lambda x, y: 2 * x, params={x: 1}, use_cm=False) + assert len(s.get_data()) == 2 + assert not s.is_parametric + + +def test_color_func_scalar_val(): + # verify that eval_color_func returns a numpy array even when color_func + # evaluates to a scalar value + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: 1) + xx, yy, col = s.get_data() + assert np.allclose(col, np.ones(xx.shape)) + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 2*pi), + adaptive=False, n=10, color_func=lambda t: 1) + xx, yy, zz, col = s.get_data() + assert np.allclose(col, np.ones(xx.shape)) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + adaptive=False, n1=10, n2=10, color_func=lambda x: 1) + xx, yy, zz = s.get_data() + assert np.allclose(s.eval_color_func(xx), np.ones(xx.shape)) + + s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1), adaptive=False, + n1=10, n2=10, color_func=lambda u: 1) + xx, yy, zz, uu, vv = s.get_data() + col = s.eval_color_func(xx, yy, zz, uu, vv) + assert np.allclose(col, np.ones(xx.shape)) + + +def test_color_func_expression(): + # verify that color_func is able to deal with instances of Expr: they will + # be lambdified with the same signature used for the main expression. + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + color_func=sin(x), adaptive=False, n=10, use_cm=True) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + color_func=lambda x: np.cos(x), adaptive=False, n=10, use_cm=True) + # the following statement should not raise errors + d1 = s1.get_data() + assert callable(s1.color_func) + d2 = s2.get_data() + assert not np.allclose(d1[-1], d2[-1]) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), + color_func=sin(x**2 + y**2), adaptive=False, n1=5, n2=5) + # the following statement should not raise errors + s.get_data() + assert callable(s.color_func) + + xx = [1, 2, 3, 4, 5] + yy = [1, 2, 3, 4, 5] + raises(TypeError, + lambda : List2DSeries(xx, yy, use_cm=True, color_func=sin(x))) + + +def test_line_surface_color(): + # verify the back-compatibility with the old sympy.plotting module. + # By setting line_color or surface_color to be a callable, it will set + # the color_func attribute. + + x, y, z = symbols("x, y, z") + + s = LineOver1DRangeSeries(sin(x), (x, -5, 5), adaptive=False, n=10, + line_color=lambda x: x) + assert (s.line_color is None) and callable(s.color_func) + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 2*pi), + adaptive=False, n=10, line_color=lambda t: t) + assert (s.line_color is None) and callable(s.color_func) + + s = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -2, 2), (y, -2, 2), + n1=10, n2=10, surface_color=lambda x: x) + assert (s.surface_color is None) and callable(s.color_func) + + +def test_complex_adaptive_false(): + # verify that series with adaptive=False is evaluated with discretized + # ranges of type complex. + if not np: + skip("numpy not installed.") + + x, y, u = symbols("x y u") + + def do_test(data1, data2): + assert len(data1) == len(data2) + for d1, d2 in zip(data1, data2): + assert np.allclose(d1, d2) + + expr1 = sqrt(x) * exp(-x**2) + expr2 = sqrt(u * x) * exp(-x**2) + s1 = LineOver1DRangeSeries(im(expr1), (x, -5, 5), adaptive=False, n=10) + s2 = LineOver1DRangeSeries(im(expr2), (x, -5, 5), + adaptive=False, n=10, params={u: 1}) + data1 = s1.get_data() + data2 = s2.get_data() + + do_test(data1, data2) + assert (not np.allclose(data1[1], 0)) and (not np.allclose(data2[1], 0)) + + s1 = Parametric2DLineSeries(re(expr1), im(expr1), (x, -pi, pi), + adaptive=False, n=10) + s2 = Parametric2DLineSeries(re(expr2), im(expr2), (x, -pi, pi), + adaptive=False, n=10, params={u: 1}) + data1 = s1.get_data() + data2 = s2.get_data() + do_test(data1, data2) + assert (not np.allclose(data1[1], 0)) and (not np.allclose(data2[1], 0)) + + s1 = SurfaceOver2DRangeSeries(im(expr1), (x, -5, 5), (y, -10, 10), + adaptive=False, n1=30, n2=3) + s2 = SurfaceOver2DRangeSeries(im(expr2), (x, -5, 5), (y, -10, 10), + adaptive=False, n1=30, n2=3, params={u: 1}) + data1 = s1.get_data() + data2 = s2.get_data() + do_test(data1, data2) + assert (not np.allclose(data1[1], 0)) and (not np.allclose(data2[1], 0)) + + +def test_expr_is_lambda_function(): + # verify that when a numpy function is provided, the series will be able + # to evaluate it. Also, label should be empty in order to prevent some + # backend from crashing. + if not np: + skip("numpy not installed.") + + f = lambda x: np.cos(x) + s1 = LineOver1DRangeSeries(f, ("x", -5, 5), adaptive=True, depth=3) + s1.get_data() + s2 = LineOver1DRangeSeries(f, ("x", -5, 5), adaptive=False, n=10) + s2.get_data() + assert s1.label == s2.label == "" + + fx = lambda x: np.cos(x) + fy = lambda x: np.sin(x) + s1 = Parametric2DLineSeries(fx, fy, ("x", 0, 2*pi), + adaptive=True, adaptive_goal=0.1) + s1.get_data() + s2 = Parametric2DLineSeries(fx, fy, ("x", 0, 2*pi), + adaptive=False, n=10) + s2.get_data() + assert s1.label == s2.label == "" + + fz = lambda x: x + s1 = Parametric3DLineSeries(fx, fy, fz, ("x", 0, 2*pi), + adaptive=True, adaptive_goal=0.1) + s1.get_data() + s2 = Parametric3DLineSeries(fx, fy, fz, ("x", 0, 2*pi), + adaptive=False, n=10) + s2.get_data() + assert s1.label == s2.label == "" + + f = lambda x, y: np.cos(x**2 + y**2) + s1 = SurfaceOver2DRangeSeries(f, ("a", -2, 2), ("b", -3, 3), + adaptive=False, n1=10, n2=10) + s1.get_data() + s2 = ContourSeries(f, ("a", -2, 2), ("b", -3, 3), + adaptive=False, n1=10, n2=10) + s2.get_data() + assert s1.label == s2.label == "" + + fx = lambda u, v: np.cos(u + v) + fy = lambda u, v: np.sin(u - v) + fz = lambda u, v: u * v + s1 = ParametricSurfaceSeries(fx, fy, fz, ("u", 0, pi), ("v", 0, 2*pi), + adaptive=False, n1=10, n2=10) + s1.get_data() + assert s1.label == "" + + raises(TypeError, lambda: List2DSeries(lambda t: t, lambda t: t)) + raises(TypeError, lambda : ImplicitSeries(lambda t: np.sin(t), + ("x", -5, 5), ("y", -6, 6))) + + +def test_show_in_legend_lines(): + # verify that lines series correctly set the show_in_legend attribute + x, u = symbols("x, u") + + s = LineOver1DRangeSeries(cos(x), (x, -2, 2), "test", show_in_legend=True) + assert s.show_in_legend + s = LineOver1DRangeSeries(cos(x), (x, -2, 2), "test", show_in_legend=False) + assert not s.show_in_legend + + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 1), "test", + show_in_legend=True) + assert s.show_in_legend + s = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 1), "test", + show_in_legend=False) + assert not s.show_in_legend + + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 1), "test", + show_in_legend=True) + assert s.show_in_legend + s = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 1), "test", + show_in_legend=False) + assert not s.show_in_legend + + +@XFAIL +def test_particular_case_1_with_adaptive_true(): + # Verify that symbolic expressions and numerical lambda functions are + # evaluated with the same algorithm. + if not np: + skip("numpy not installed.") + + # NOTE: xfail because sympy's adaptive algorithm is not deterministic + + def do_test(a, b): + with warns( + RuntimeWarning, + match="invalid value encountered in scalar power", + test_stacklevel=False, + ): + d1 = a.get_data() + d2 = b.get_data() + for t, v in zip(d1, d2): + assert np.allclose(t, v) + + n = symbols("n") + a = S(2) / 3 + epsilon = 0.01 + xn = (n**3 + n**2)**(S(1)/3) - (n**3 - n**2)**(S(1)/3) + expr = Abs(xn - a) - epsilon + math_func = lambdify([n], expr) + s1 = LineOver1DRangeSeries(expr, (n, -10, 10), "", + adaptive=True, depth=3) + s2 = LineOver1DRangeSeries(math_func, ("n", -10, 10), "", + adaptive=True, depth=3) + do_test(s1, s2) + + +def test_particular_case_1_with_adaptive_false(): + # Verify that symbolic expressions and numerical lambda functions are + # evaluated with the same algorithm. In particular, uniform evaluation + # is going to use np.vectorize, which correctly evaluates the following + # mathematical function. + if not np: + skip("numpy not installed.") + + def do_test(a, b): + d1 = a.get_data() + d2 = b.get_data() + for t, v in zip(d1, d2): + assert np.allclose(t, v) + + n = symbols("n") + a = S(2) / 3 + epsilon = 0.01 + xn = (n**3 + n**2)**(S(1)/3) - (n**3 - n**2)**(S(1)/3) + expr = Abs(xn - a) - epsilon + math_func = lambdify([n], expr) + + s3 = LineOver1DRangeSeries(expr, (n, -10, 10), "", + adaptive=False, n=10) + s4 = LineOver1DRangeSeries(math_func, ("n", -10, 10), "", + adaptive=False, n=10) + do_test(s3, s4) + + +def test_complex_params_number_eval(): + # The main expression contains terms like sqrt(xi - 1), with + # parameter (0 <= xi <= 1). + # There shouldn't be any NaN values on the output. + if not np: + skip("numpy not installed.") + + xi, wn, x0, v0, t = symbols("xi, omega_n, x0, v0, t") + x = Function("x")(t) + eq = x.diff(t, 2) + 2 * xi * wn * x.diff(t) + wn**2 * x + sol = dsolve(eq, x, ics={x.subs(t, 0): x0, x.diff(t).subs(t, 0): v0}) + params = { + wn: 0.5, + xi: 0.25, + x0: 0.45, + v0: 0.0 + } + s = LineOver1DRangeSeries(sol.rhs, (t, 0, 100), adaptive=False, n=5, + params=params) + x, y = s.get_data() + assert not np.isnan(x).any() + assert not np.isnan(y).any() + + + # Fourier Series of a sawtooth wave + # The main expression contains a Sum with a symbolic upper range. + # The lambdified code looks like: + # sum(blablabla for for n in range(1, m+1)) + # But range requires integer numbers, whereas per above example, the series + # casts parameters to complex. Verify that the series is able to detect + # upper bounds in summations and cast it to int in order to get successfull + # evaluation + x, T, n, m = symbols("x, T, n, m") + fs = S(1) / 2 - (1 / pi) * Sum(sin(2 * n * pi * x / T) / n, (n, 1, m)) + params = { + T: 4.5, + m: 5 + } + s = LineOver1DRangeSeries(fs, (x, 0, 10), adaptive=False, n=5, + params=params) + x, y = s.get_data() + assert not np.isnan(x).any() + assert not np.isnan(y).any() + + +def test_complex_range_line_plot_1(): + # verify that univariate functions are evaluated with a complex + # data range (with zero imaginary part). There shouln't be any + # NaN value in the output. + if not np: + skip("numpy not installed.") + + x, u = symbols("x, u") + expr1 = im(sqrt(x) * exp(-x**2)) + expr2 = im(sqrt(u * x) * exp(-x**2)) + s1 = LineOver1DRangeSeries(expr1, (x, -10, 10), adaptive=True, + adaptive_goal=0.1) + s2 = LineOver1DRangeSeries(expr1, (x, -10, 10), adaptive=False, n=30) + s3 = LineOver1DRangeSeries(expr2, (x, -10, 10), adaptive=False, n=30, + params={u: 1}) + + with ignore_warnings(RuntimeWarning): + data1 = s1.get_data() + data2 = s2.get_data() + data3 = s3.get_data() + + assert not np.isnan(data1[1]).any() + assert not np.isnan(data2[1]).any() + assert not np.isnan(data3[1]).any() + assert np.allclose(data2[0], data3[0]) and np.allclose(data2[1], data3[1]) + + +@XFAIL +def test_complex_range_line_plot_2(): + # verify that univariate functions are evaluated with a complex + # data range (with non-zero imaginary part). There shouln't be any + # NaN value in the output. + if not np: + skip("numpy not installed.") + + # NOTE: xfail because sympy's adaptive algorithm is unable to deal with + # complex number. + + x, u = symbols("x, u") + + # adaptive and uniform meshing should produce the same data. + # because of the adaptive nature, just compare the first and last points + # of both series. + s1 = LineOver1DRangeSeries(abs(sqrt(x)), (x, -5-2j, 5-2j), adaptive=True) + s2 = LineOver1DRangeSeries(abs(sqrt(x)), (x, -5-2j, 5-2j), adaptive=False, + n=10) + with warns( + RuntimeWarning, + match="invalid value encountered in sqrt", + test_stacklevel=False, + ): + d1 = s1.get_data() + d2 = s2.get_data() + xx1 = [d1[0][0], d1[0][-1]] + xx2 = [d2[0][0], d2[0][-1]] + yy1 = [d1[1][0], d1[1][-1]] + yy2 = [d2[1][0], d2[1][-1]] + assert np.allclose(xx1, xx2) + assert np.allclose(yy1, yy2) + + +def test_force_real_eval(): + # verify that force_real_eval=True produces inconsistent results when + # compared with evaluation of complex domain. + if not np: + skip("numpy not installed.") + + x = symbols("x") + + expr = im(sqrt(x) * exp(-x**2)) + s1 = LineOver1DRangeSeries(expr, (x, -10, 10), adaptive=False, n=10, + force_real_eval=False) + s2 = LineOver1DRangeSeries(expr, (x, -10, 10), adaptive=False, n=10, + force_real_eval=True) + d1 = s1.get_data() + with ignore_warnings(RuntimeWarning): + d2 = s2.get_data() + assert not np.allclose(d1[1], 0) + assert np.allclose(d2[1], 0) + + +def test_contour_series_show_clabels(): + # verify that a contour series has the abiliy to set the visibility of + # labels to contour lines + + x, y = symbols("x, y") + s = ContourSeries(cos(x*y), (x, -2, 2), (y, -2, 2)) + assert s.show_clabels + + s = ContourSeries(cos(x*y), (x, -2, 2), (y, -2, 2), clabels=True) + assert s.show_clabels + + s = ContourSeries(cos(x*y), (x, -2, 2), (y, -2, 2), clabels=False) + assert not s.show_clabels + + +def test_LineOver1DRangeSeries_complex_range(): + # verify that LineOver1DRangeSeries can accept a complex range + # if the imaginary part of the start and end values are the same + + x = symbols("x") + + LineOver1DRangeSeries(sqrt(x), (x, -10, 10)) + LineOver1DRangeSeries(sqrt(x), (x, -10-2j, 10-2j)) + raises(ValueError, + lambda : LineOver1DRangeSeries(sqrt(x), (x, -10-2j, 10+2j))) + + +def test_symbolic_plotting_ranges(): + # verify that data series can use symbolic plotting ranges + if not np: + skip("numpy not installed.") + + x, y, z, a, b = symbols("x, y, z, a, b") + + def do_test(s1, s2, new_params): + d1 = s1.get_data() + d2 = s2.get_data() + for u, v in zip(d1, d2): + assert np.allclose(u, v) + s2.params = new_params + d2 = s2.get_data() + for u, v in zip(d1, d2): + assert not np.allclose(u, v) + + s1 = LineOver1DRangeSeries(sin(x), (x, 0, 1), adaptive=False, n=10) + s2 = LineOver1DRangeSeries(sin(x), (x, a, b), params={a: 0, b: 1}, + adaptive=False, n=10) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : LineOver1DRangeSeries(sin(x), (x, a, b), params={a: 1}, n=10)) + + s1 = Parametric2DLineSeries(cos(x), sin(x), (x, 0, 1), adaptive=False, n=10) + s2 = Parametric2DLineSeries(cos(x), sin(x), (x, a, b), params={a: 0, b: 1}, + adaptive=False, n=10) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : Parametric2DLineSeries(cos(x), sin(x), (x, a, b), + params={a: 0}, adaptive=False, n=10)) + + s1 = Parametric3DLineSeries(cos(x), sin(x), x, (x, 0, 1), + adaptive=False, n=10) + s2 = Parametric3DLineSeries(cos(x), sin(x), x, (x, a, b), + params={a: 0, b: 1}, adaptive=False, n=10) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : Parametric3DLineSeries(cos(x), sin(x), x, (x, a, b), + params={a: 0}, adaptive=False, n=10)) + + s1 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -pi, pi), (y, -pi, pi), + adaptive=False, n1=5, n2=5) + s2 = SurfaceOver2DRangeSeries(cos(x**2 + y**2), (x, -pi * a, pi * a), + (y, -pi * b, pi * b), params={a: 1, b: 1}, + adaptive=False, n1=5, n2=5) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : SurfaceOver2DRangeSeries(cos(x**2 + y**2), + (x, -pi * a, pi * a), (y, -pi * b, pi * b), params={a: 1}, + adaptive=False, n1=5, n2=5)) + # one range symbol is included into another range's minimum or maximum val + raises(ValueError, + lambda : SurfaceOver2DRangeSeries(cos(x**2 + y**2), + (x, -pi * a + y, pi * a), (y, -pi * b, pi * b), params={a: 1}, + adaptive=False, n1=5, n2=5)) + + s1 = ParametricSurfaceSeries( + cos(x - y), sin(x + y), x - y, (x, -2, 2), (y, -2, 2), n1=5, n2=5) + s2 = ParametricSurfaceSeries( + cos(x - y), sin(x + y), x - y, (x, -2 * a, 2), (y, -2, 2 * b), + params={a: 1, b: 1}, n1=5, n2=5) + do_test(s1, s2, {a: 0.5, b: 1.5}) + + # missing a parameter + raises(ValueError, + lambda : ParametricSurfaceSeries( + cos(x - y), sin(x + y), x - y, (x, -2 * a, 2), (y, -2, 2 * b), + params={a: 1}, n1=5, n2=5)) + + +def test_exclude_points(): + # verify that exclude works as expected + if not np: + skip("numpy not installed.") + + x = symbols("x") + + expr = (floor(x) + S.Half) / (1 - (x - S.Half)**2) + + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some", + test_stacklevel=False, + ): + s = LineOver1DRangeSeries(expr, (x, -3.5, 3.5), adaptive=False, n=100, + exclude=list(range(-3, 4))) + xx, yy = s.get_data() + assert not np.isnan(xx).any() + assert np.count_nonzero(np.isnan(yy)) == 7 + assert len(xx) > 100 + + e1 = log(floor(x)) * cos(x) + e2 = log(floor(x)) * sin(x) + with warns( + UserWarning, + match="NumPy is unable to evaluate with complex numbers some", + test_stacklevel=False, + ): + s = Parametric2DLineSeries(e1, e2, (x, 1, 12), adaptive=False, n=100, + exclude=list(range(1, 13))) + xx, yy, pp = s.get_data() + assert not np.isnan(pp).any() + assert np.count_nonzero(np.isnan(xx)) == 11 + assert np.count_nonzero(np.isnan(yy)) == 11 + assert len(xx) > 100 + + +def test_unwrap(): + # verify that unwrap works as expected + if not np: + skip("numpy not installed.") + + x, y = symbols("x, y") + expr = 1 / (x**3 + 2*x**2 + x) + expr = arg(expr.subs(x, I*y*2*pi)) + s1 = LineOver1DRangeSeries(expr, (y, 1e-05, 1e05), xscale="log", + adaptive=False, n=10, unwrap=False) + s2 = LineOver1DRangeSeries(expr, (y, 1e-05, 1e05), xscale="log", + adaptive=False, n=10, unwrap=True) + s3 = LineOver1DRangeSeries(expr, (y, 1e-05, 1e05), xscale="log", + adaptive=False, n=10, unwrap={"period": 4}) + x1, y1 = s1.get_data() + x2, y2 = s2.get_data() + x3, y3 = s3.get_data() + assert np.allclose(x1, x2) + # there must not be nan values in the results of these evaluations + assert all(not np.isnan(t).any() for t in [y1, y2, y3]) + assert not np.allclose(y1, y2) + assert not np.allclose(y1, y3) + assert not np.allclose(y2, y3) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_utils.py b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4206a8b001319552c2e2be1aeb46057e6f708912 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/plotting/tests/test_utils.py @@ -0,0 +1,110 @@ +from pytest import raises +from sympy import ( + symbols, Expr, Tuple, Integer, cos, solveset, FiniteSet, ImageSet) +from sympy.plotting.utils import ( + _create_ranges, _plot_sympify, extract_solution) +from sympy.physics.mechanics import ReferenceFrame, Vector as MechVector +from sympy.vector import CoordSys3D, Vector + + +def test_plot_sympify(): + x, y = symbols("x, y") + + # argument is already sympified + args = x + y + r = _plot_sympify(args) + assert r == args + + # one argument needs to be sympified + args = (x + y, 1) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], Expr) + assert isinstance(r[1], Integer) + + # string and dict should not be sympified + args = (x + y, (x, 0, 1), "str", 1, {1: 1, 2: 2.0}) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 5 + assert isinstance(r[0], Expr) + assert isinstance(r[1], Tuple) + assert isinstance(r[2], str) + assert isinstance(r[3], Integer) + assert isinstance(r[4], dict) and isinstance(r[4][1], int) and isinstance(r[4][2], float) + + # nested arguments containing strings + args = ((x + y, (y, 0, 1), "a"), (x + 1, (x, 0, 1), "$f_{1}$")) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], Tuple) + assert isinstance(r[0][1], Tuple) + assert isinstance(r[0][1][1], Integer) + assert isinstance(r[0][2], str) + assert isinstance(r[1], Tuple) + assert isinstance(r[1][1], Tuple) + assert isinstance(r[1][1][1], Integer) + assert isinstance(r[1][2], str) + + # vectors from sympy.physics.vectors module are not sympified + # vectors from sympy.vectors are sympified + # in both cases, no error should be raised + R = ReferenceFrame("R") + v1 = 2 * R.x + R.y + C = CoordSys3D("C") + v2 = 2 * C.i + C.j + args = (v1, v2) + r = _plot_sympify(args) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(v1, MechVector) + assert isinstance(v2, Vector) + + +def test_create_ranges(): + x, y = symbols("x, y") + + # user don't provide any range -> return a default range + r = _create_ranges({x}, [], 1) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 1 + assert isinstance(r[0], (Tuple, tuple)) + assert r[0] == (x, -10, 10) + + r = _create_ranges({x, y}, [], 2) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], (Tuple, tuple)) + assert isinstance(r[1], (Tuple, tuple)) + assert r[0] == (x, -10, 10) or (y, -10, 10) + assert r[1] == (y, -10, 10) or (x, -10, 10) + assert r[0] != r[1] + + # not enough ranges provided by the user -> create default ranges + r = _create_ranges( + {x, y}, + [ + (x, 0, 1), + ], + 2, + ) + assert isinstance(r, (list, tuple, Tuple)) and len(r) == 2 + assert isinstance(r[0], (Tuple, tuple)) + assert isinstance(r[1], (Tuple, tuple)) + assert r[0] == (x, 0, 1) or (y, -10, 10) + assert r[1] == (y, -10, 10) or (x, 0, 1) + assert r[0] != r[1] + + # too many free symbols + raises(ValueError, lambda: _create_ranges({x, y}, [], 1)) + raises(ValueError, lambda: _create_ranges({x, y}, [(x, 0, 5), (y, 0, 1)], 1)) + + +def test_extract_solution(): + x = symbols("x") + + sol = solveset(cos(10 * x)) + assert sol.has(ImageSet) + res = extract_solution(sol) + assert len(res) == 20 + assert isinstance(res, FiniteSet) + + res = extract_solution(sol, 20) + assert len(res) == 40 + assert isinstance(res, FiniteSet) diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a51f0d38a41760e578336d36e2b560db7e310edf --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:310b467920b7c47537a5b4cc7be2d95d9bc3e1debd0eb0eec15d1a086c54bff2 +size 106624 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee77f92832222b044bf3663a2d6d33468edc62e6 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/tests/__pycache__/test_systems.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:012aa5a4a636162aecfdaf1098e2074ea9e7136f1d75471f8ef7a0ca7df6e9e8 +size 112044 diff --git a/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c19e254f936093d9e74d9d7d740196508c77bb3 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solvers.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbf9853e6395e88fe228f678aef9abe73d4c381a412697e313bdfce599db29b7 +size 108394 diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so b/evalkit_tf437/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..593f6c158d2967d4448c0975d4ac30442e7be923 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f812dd0f40ab6a517a9a5a66ad96c6c0e56f1c9ad7568ad5ba4c9d29a129e42a +size 4985256 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c434c5c18117a39ace3865b89ff83a1a85666e1c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75534834cdeaacfedcd048f72c7f3161be5e997b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/tempita.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf5a3212ce26610358cfcbfff28d9718b667d994 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/__pycache__/version.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py new file mode 100644 index 0000000000000000000000000000000000000000..c92ea17d2a9b99d944324eafde86bbee2fa485bc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/tempita.py @@ -0,0 +1,60 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import argparse +import os + +from Cython import Tempita as tempita + +# XXX: If this import ever fails (does it really?), vendor either +# cython.tempita or numpy/npy_tempita. + + +def process_tempita(fromfile, outfile=None): + """Process tempita templated file and write out the result. + + The template file is expected to end in `.c.tp` or `.pyx.tp`: + E.g. processing `template.c.in` generates `template.c`. + + """ + with open(fromfile, "r", encoding="utf-8") as f: + template_content = f.read() + + template = tempita.Template(template_content) + content = template.substitute() + + with open(outfile, "w", encoding="utf-8") as f: + f.write(content) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("infile", type=str, help="Path to the input file") + parser.add_argument("-o", "--outdir", type=str, help="Path to the output directory") + parser.add_argument( + "-i", + "--ignore", + type=str, + help=( + "An ignored input - may be useful to add a " + "dependency between custom targets" + ), + ) + args = parser.parse_args() + + if not args.infile.endswith(".tp"): + raise ValueError(f"Unexpected extension: {args.infile}") + + if not args.outdir: + raise ValueError("Missing `--outdir` argument to tempita.py") + + outdir_abs = os.path.join(os.getcwd(), args.outdir) + outfile = os.path.join( + outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0] + ) + + process_tempita(args.infile, outfile) + + +if __name__ == "__main__": + main() diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/version.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/version.py new file mode 100644 index 0000000000000000000000000000000000000000..922a14917bf3fd2d395a4f5002a39c4d9d9c7ee2 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/_build_utils/version.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +"""Extract version number from __init__.py""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import os + +sklearn_init = os.path.join(os.path.dirname(__file__), "../__init__.py") + +data = open(sklearn_init).readlines() +version_line = next(line for line in data if line.startswith("__version__")) + +version = version_line.strip().split(" = ")[1].replace('"', "").replace("'", "") + +print(version) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..989f3372b42e0161c45a239ffd27387233448488 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__init__.py @@ -0,0 +1,46 @@ +"""Methods and algorithms to robustly estimate covariance. + +They estimate the covariance of features at given sets of points, as well as the +precision matrix defined as the inverse of the covariance. Covariance estimation is +closely related to the theory of Gaussian graphical models. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from ._elliptic_envelope import EllipticEnvelope +from ._empirical_covariance import ( + EmpiricalCovariance, + empirical_covariance, + log_likelihood, +) +from ._graph_lasso import GraphicalLasso, GraphicalLassoCV, graphical_lasso +from ._robust_covariance import MinCovDet, fast_mcd +from ._shrunk_covariance import ( + OAS, + LedoitWolf, + ShrunkCovariance, + ledoit_wolf, + ledoit_wolf_shrinkage, + oas, + shrunk_covariance, +) + +__all__ = [ + "EllipticEnvelope", + "EmpiricalCovariance", + "GraphicalLasso", + "GraphicalLassoCV", + "LedoitWolf", + "MinCovDet", + "OAS", + "ShrunkCovariance", + "empirical_covariance", + "fast_mcd", + "graphical_lasso", + "ledoit_wolf", + "ledoit_wolf_shrinkage", + "log_likelihood", + "oas", + "shrunk_covariance", +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..206e04720a866bb07b14740ced243467dd22d16e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b61fffd0dab32d47d632251e046662c52977a56 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_elliptic_envelope.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e57803ffdfc97805ddcd20e4ffa16ea4000a96f3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_empirical_covariance.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7036184b03ec95d755091e9994323bddd58ecc6d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_graph_lasso.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8decae9f573aff1455203c713a723525690f003f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/__pycache__/_robust_covariance.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py new file mode 100644 index 0000000000000000000000000000000000000000..81ae86b4ad76ecab67a09c16e295edb0034343ff --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_elliptic_envelope.py @@ -0,0 +1,266 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from numbers import Real + +import numpy as np + +from ..base import OutlierMixin, _fit_context +from ..metrics import accuracy_score +from ..utils._param_validation import Interval +from ..utils.validation import check_is_fitted +from ._robust_covariance import MinCovDet + + +class EllipticEnvelope(OutlierMixin, MinCovDet): + """An object for detecting outliers in a Gaussian distributed dataset. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, the support of robust location and covariance estimates + is computed, and a covariance estimate is recomputed from it, + without centering the data. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, the robust location and covariance are directly computed + with the FastMCD algorithm without additional treatment. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. If None, the minimum value of support_fraction will + be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`. + Range is (0, 1). + + contamination : float, default=0.1 + The amount of contamination of the data set, i.e. the proportion + of outliers in the data set. Range is (0, 0.5]. + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling + the data. Pass an int for reproducible results across multiple function + calls. See :term:`Glossary `. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated robust location. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated robust covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute the + robust estimates of location and shape. + + offset_ : float + Offset used to define the decision function from the raw scores. + We have the relation: ``decision_function = score_samples - offset_``. + The offset depends on the contamination parameter and is defined in + such a way we obtain the expected number of outliers (samples with + decision function < 0) in training. + + .. versionadded:: 0.20 + + raw_location_ : ndarray of shape (n_features,) + The raw robust estimated location before correction and re-weighting. + + raw_covariance_ : ndarray of shape (n_features, n_features) + The raw robust estimated covariance before correction and re-weighting. + + raw_support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the raw robust estimates of location and shape, before correction + and re-weighting. + + dist_ : ndarray of shape (n_samples,) + Mahalanobis distances of the training set (on which :meth:`fit` is + called) observations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + Outlier detection from covariance estimation may break or not + perform well in high-dimensional settings. In particular, one will + always take care to work with ``n_samples > n_features ** 2``. + + References + ---------- + .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the + minimum covariance determinant estimator" Technometrics 41(3), 212 + (1999) + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import EllipticEnvelope + >>> true_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0], + ... cov=true_cov, + ... size=500) + >>> cov = EllipticEnvelope(random_state=0).fit(X) + >>> # predict returns 1 for an inlier and -1 for an outlier + >>> cov.predict([[0, 0], + ... [3, 3]]) + array([ 1, -1]) + >>> cov.covariance_ + array([[0.7411..., 0.2535...], + [0.2535..., 0.3053...]]) + >>> cov.location_ + array([0.0813... , 0.0427...]) + """ + + _parameter_constraints: dict = { + **MinCovDet._parameter_constraints, + "contamination": [Interval(Real, 0, 0.5, closed="right")], + } + + def __init__( + self, + *, + store_precision=True, + assume_centered=False, + support_fraction=None, + contamination=0.1, + random_state=None, + ): + super().__init__( + store_precision=store_precision, + assume_centered=assume_centered, + support_fraction=support_fraction, + random_state=random_state, + ) + self.contamination = contamination + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the EllipticEnvelope model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + super().fit(X) + self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination) + return self + + def decision_function(self, X): + """Compute the decision function of the given observations. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + decision : ndarray of shape (n_samples,) + Decision function of the samples. + It is equal to the shifted Mahalanobis distances. + The threshold for being an outlier is 0, which ensures a + compatibility with other outlier detection algorithms. + """ + check_is_fitted(self) + negative_mahal_dist = self.score_samples(X) + return negative_mahal_dist - self.offset_ + + def score_samples(self, X): + """Compute the negative Mahalanobis distances. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + negative_mahal_distances : array-like of shape (n_samples,) + Opposite of the Mahalanobis distances. + """ + check_is_fitted(self) + return -self.mahalanobis(X) + + def predict(self, X): + """ + Predict labels (1 inlier, -1 outlier) of X according to fitted model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix. + + Returns + ------- + is_inlier : ndarray of shape (n_samples,) + Returns -1 for anomalies/outliers and +1 for inliers. + """ + values = self.decision_function(X) + is_inlier = np.full(values.shape[0], -1, dtype=int) + is_inlier[values >= 0] = 1 + + return is_inlier + + def score(self, X, y, sample_weight=None): + """Return the mean accuracy on the given test data and labels. + + In multi-label classification, this is the subset accuracy + which is a harsh metric since you require for each sample that + each label set be correctly predicted. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) or (n_samples, n_outputs) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Returns + ------- + score : float + Mean accuracy of self.predict(X) w.r.t. y. + """ + return accuracy_score(y, self.predict(X), sample_weight=sample_weight) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..955046fa37d4b9bb0813394b97c7a1879fac62dd --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_empirical_covariance.py @@ -0,0 +1,367 @@ +""" +Maximum likelihood covariance estimator. + +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +# avoid division truncation +import warnings + +import numpy as np +from scipy import linalg + +from sklearn.utils import metadata_routing + +from .. import config_context +from ..base import BaseEstimator, _fit_context +from ..metrics.pairwise import pairwise_distances +from ..utils import check_array +from ..utils._param_validation import validate_params +from ..utils.extmath import fast_logdet +from ..utils.validation import validate_data + + +@validate_params( + { + "emp_cov": [np.ndarray], + "precision": [np.ndarray], + }, + prefer_skip_nested_validation=True, +) +def log_likelihood(emp_cov, precision): + """Compute the sample mean of the log_likelihood under a covariance model. + + Computes the empirical expected log-likelihood, allowing for universal + comparison (beyond this software package), and accounts for normalization + terms and scaling. + + Parameters + ---------- + emp_cov : ndarray of shape (n_features, n_features) + Maximum Likelihood Estimator of covariance. + + precision : ndarray of shape (n_features, n_features) + The precision matrix of the covariance model to be tested. + + Returns + ------- + log_likelihood_ : float + Sample mean of the log-likelihood. + """ + p = precision.shape[0] + log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision) + log_likelihood_ -= p * np.log(2 * np.pi) + log_likelihood_ /= 2.0 + return log_likelihood_ + + +@validate_params( + { + "X": ["array-like"], + "assume_centered": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def empirical_covariance(X, *, assume_centered=False): + """Compute the Maximum likelihood covariance estimator. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If `True`, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If `False`, data will be centered before computation. + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + Empirical covariance (Maximum Likelihood Estimator). + + Examples + -------- + >>> from sklearn.covariance import empirical_covariance + >>> X = [[1,1,1],[1,1,1],[1,1,1], + ... [0,0,0],[0,0,0],[0,0,0]] + >>> empirical_covariance(X) + array([[0.25, 0.25, 0.25], + [0.25, 0.25, 0.25], + [0.25, 0.25, 0.25]]) + """ + X = check_array(X, ensure_2d=False, ensure_all_finite=False) + + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + + if X.shape[0] == 1: + warnings.warn( + "Only one sample available. You may want to reshape your data array" + ) + + if assume_centered: + covariance = np.dot(X.T, X) / X.shape[0] + else: + covariance = np.cov(X.T, bias=1) + + if covariance.ndim == 0: + covariance = np.array([[covariance]]) + return covariance + + +class EmpiricalCovariance(BaseEstimator): + """Maximum likelihood covariance estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specifies if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo-inverse matrix. + (stored only if store_precision is True) + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import EmpiricalCovariance + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = EmpiricalCovariance().fit(X) + >>> cov.covariance_ + array([[0.7569..., 0.2818...], + [0.2818..., 0.3928...]]) + >>> cov.location_ + array([0.0622..., 0.0193...]) + """ + + # X_test should have been called X + __metadata_request__score = {"X_test": metadata_routing.UNUSED} + + _parameter_constraints: dict = { + "store_precision": ["boolean"], + "assume_centered": ["boolean"], + } + + def __init__(self, *, store_precision=True, assume_centered=False): + self.store_precision = store_precision + self.assume_centered = assume_centered + + def _set_covariance(self, covariance): + """Saves the covariance and precision estimates + + Storage is done accordingly to `self.store_precision`. + Precision stored only if invertible. + + Parameters + ---------- + covariance : array-like of shape (n_features, n_features) + Estimated covariance matrix to be stored, and from which precision + is computed. + """ + covariance = check_array(covariance) + # set covariance + self.covariance_ = covariance + # set precision + if self.store_precision: + self.precision_ = linalg.pinvh(covariance, check_finite=False) + else: + self.precision_ = None + + def get_precision(self): + """Getter for the precision matrix. + + Returns + ------- + precision_ : array-like of shape (n_features, n_features) + The precision matrix associated to the current covariance object. + """ + if self.store_precision: + precision = self.precision_ + else: + precision = linalg.pinvh(self.covariance_, check_finite=False) + return precision + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the maximum likelihood covariance estimator to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance = empirical_covariance(X, assume_centered=self.assume_centered) + self._set_covariance(covariance) + + return self + + def score(self, X_test, y=None): + """Compute the log-likelihood of `X_test` under the estimated Gaussian model. + + The Gaussian model is defined by its mean and covariance matrix which are + represented respectively by `self.location_` and `self.covariance_`. + + Parameters + ---------- + X_test : array-like of shape (n_samples, n_features) + Test data of which we compute the likelihood, where `n_samples` is + the number of samples and `n_features` is the number of features. + `X_test` is assumed to be drawn from the same distribution than + the data used in fit (including centering). + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + res : float + The log-likelihood of `X_test` with `self.location_` and `self.covariance_` + as estimators of the Gaussian model mean and covariance matrix respectively. + """ + X_test = validate_data(self, X_test, reset=False) + # compute empirical covariance of the test set + test_cov = empirical_covariance(X_test - self.location_, assume_centered=True) + # compute log likelihood + res = log_likelihood(test_cov, self.get_precision()) + + return res + + def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True): + """Compute the Mean Squared Error between two covariance estimators. + + Parameters + ---------- + comp_cov : array-like of shape (n_features, n_features) + The covariance to compare with. + + norm : {"frobenius", "spectral"}, default="frobenius" + The type of norm used to compute the error. Available error types: + - 'frobenius' (default): sqrt(tr(A^t.A)) + - 'spectral': sqrt(max(eigenvalues(A^t.A)) + where A is the error ``(comp_cov - self.covariance_)``. + + scaling : bool, default=True + If True (default), the squared error norm is divided by n_features. + If False, the squared error norm is not rescaled. + + squared : bool, default=True + Whether to compute the squared error norm or the error norm. + If True (default), the squared error norm is returned. + If False, the error norm is returned. + + Returns + ------- + result : float + The Mean Squared Error (in the sense of the Frobenius norm) between + `self` and `comp_cov` covariance estimators. + """ + # compute the error + error = comp_cov - self.covariance_ + # compute the error norm + if norm == "frobenius": + squared_norm = np.sum(error**2) + elif norm == "spectral": + squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) + else: + raise NotImplementedError( + "Only spectral and frobenius norms are implemented" + ) + # optionally scale the error norm + if scaling: + squared_norm = squared_norm / error.shape[0] + # finally get either the squared norm or the norm + if squared: + result = squared_norm + else: + result = np.sqrt(squared_norm) + + return result + + def mahalanobis(self, X): + """Compute the squared Mahalanobis distances of given observations. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The observations, the Mahalanobis distances of the which we + compute. Observations are assumed to be drawn from the same + distribution than the data used in fit. + + Returns + ------- + dist : ndarray of shape (n_samples,) + Squared Mahalanobis distances of the observations. + """ + X = validate_data(self, X, reset=False) + + precision = self.get_precision() + with config_context(assume_finite=True): + # compute mahalanobis distances + dist = pairwise_distances( + X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision + ) + + return np.reshape(dist, (len(X),)) ** 2 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py new file mode 100644 index 0000000000000000000000000000000000000000..73fa4f1fd6e66069a84acdd21b8d5d9508008bb0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py @@ -0,0 +1,1140 @@ +"""GraphicalLasso: sparse inverse covariance estimation with an l1-penalized +estimator. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import operator +import sys +import time +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg + +from ..base import _fit_context +from ..exceptions import ConvergenceWarning + +# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast' +from ..linear_model import _cd_fast as cd_fast # type: ignore +from ..linear_model import lars_path_gram +from ..model_selection import check_cv, cross_val_score +from ..utils import Bunch +from ..utils._param_validation import Interval, StrOptions, validate_params +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _is_arraylike_not_scalar, + check_random_state, + check_scalar, + validate_data, +) +from . import EmpiricalCovariance, empirical_covariance, log_likelihood + + +# Helper functions to compute the objective and dual objective functions +# of the l1-penalized estimator +def _objective(mle, precision_, alpha): + """Evaluation of the graphical-lasso objective function + + the objective function is made of a shifted scaled version of the + normalized log-likelihood (i.e. its empirical mean over the samples) and a + penalisation term to promote sparsity + """ + p = precision_.shape[0] + cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi) + cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) + return cost + + +def _dual_gap(emp_cov, precision_, alpha): + """Expression of the dual gap convergence criterion + + The specific definition is given in Duchi "Projected Subgradient Methods + for Learning Sparse Gaussians". + """ + gap = np.sum(emp_cov * precision_) + gap -= precision_.shape[0] + gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum()) + return gap + + +# The g-lasso algorithm +def _graphical_lasso( + emp_cov, + alpha, + *, + cov_init=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, +): + _, n_features = emp_cov.shape + if alpha == 0: + # Early return without regularization + precision_ = linalg.inv(emp_cov) + cost = -2.0 * log_likelihood(emp_cov, precision_) + cost += n_features * np.log(2 * np.pi) + d_gap = np.sum(emp_cov * precision_) - n_features + return emp_cov, precision_, (cost, d_gap), 0 + + if cov_init is None: + covariance_ = emp_cov.copy() + else: + covariance_ = cov_init.copy() + # As a trivial regularization (Tikhonov like), we scale down the + # off-diagonal coefficients of our starting point: This is needed, as + # in the cross-validation the cov_init can easily be + # ill-conditioned, and the CV loop blows. Beside, this takes + # conservative stand-point on the initial conditions, and it tends to + # make the convergence go faster. + covariance_ *= 0.95 + diagonal = emp_cov.flat[:: n_features + 1] + covariance_.flat[:: n_features + 1] = diagonal + precision_ = linalg.pinvh(covariance_) + + indices = np.arange(n_features) + i = 0 # initialize the counter to be robust to `max_iter=0` + costs = list() + # The different l1 regression solver have different numerical errors + if mode == "cd": + errors = dict(over="raise", invalid="ignore") + else: + errors = dict(invalid="raise") + try: + # be robust to the max_iter=0 edge case, see: + # https://github.com/scikit-learn/scikit-learn/issues/4134 + d_gap = np.inf + # set a sub_covariance buffer + sub_covariance = np.copy(covariance_[1:, 1:], order="C") + for i in range(max_iter): + for idx in range(n_features): + # To keep the contiguous matrix `sub_covariance` equal to + # covariance_[indices != idx].T[indices != idx] + # we only need to update 1 column and 1 line when idx changes + if idx > 0: + di = idx - 1 + sub_covariance[di] = covariance_[di][indices != idx] + sub_covariance[:, di] = covariance_[:, di][indices != idx] + else: + sub_covariance[:] = covariance_[1:, 1:] + row = emp_cov[idx, indices != idx] + with np.errstate(**errors): + if mode == "cd": + # Use coordinate descent + coefs = -( + precision_[indices != idx, idx] + / (precision_[idx, idx] + 1000 * eps) + ) + coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram( + coefs, + alpha, + 0, + sub_covariance, + row, + row, + max_iter, + enet_tol, + check_random_state(None), + False, + ) + else: # mode == "lars" + _, _, coefs = lars_path_gram( + Xy=row, + Gram=sub_covariance, + n_samples=row.size, + alpha_min=alpha / (n_features - 1), + copy_Gram=True, + eps=eps, + method="lars", + return_path=False, + ) + # Update the precision matrix + precision_[idx, idx] = 1.0 / ( + covariance_[idx, idx] + - np.dot(covariance_[indices != idx, idx], coefs) + ) + precision_[indices != idx, idx] = -precision_[idx, idx] * coefs + precision_[idx, indices != idx] = -precision_[idx, idx] * coefs + coefs = np.dot(sub_covariance, coefs) + covariance_[idx, indices != idx] = coefs + covariance_[indices != idx, idx] = coefs + if not np.isfinite(precision_.sum()): + raise FloatingPointError( + "The system is too ill-conditioned for this solver" + ) + d_gap = _dual_gap(emp_cov, precision_, alpha) + cost = _objective(emp_cov, precision_, alpha) + if verbose: + print( + "[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e" + % (i, cost, d_gap) + ) + costs.append((cost, d_gap)) + if np.abs(d_gap) < tol: + break + if not np.isfinite(cost) and i > 0: + raise FloatingPointError( + "Non SPD result: the system is too ill-conditioned for this solver" + ) + else: + warnings.warn( + "graphical_lasso: did not converge after %i iteration: dual gap: %.3e" + % (max_iter, d_gap), + ConvergenceWarning, + ) + except FloatingPointError as e: + e.args = (e.args[0] + ". The system is too ill-conditioned for this solver",) + raise e + + return covariance_, precision_, costs, i + 1 + + +def alpha_max(emp_cov): + """Find the maximum alpha for which there are some non-zeros off-diagonal. + + Parameters + ---------- + emp_cov : ndarray of shape (n_features, n_features) + The sample covariance matrix. + + Notes + ----- + This results from the bound for the all the Lasso that are solved + in GraphicalLasso: each time, the row of cov corresponds to Xy. As the + bound for alpha is given by `max(abs(Xy))`, the result follows. + """ + A = np.copy(emp_cov) + A.flat[:: A.shape[0] + 1] = 0 + return np.max(np.abs(A)) + + +@validate_params( + { + "emp_cov": ["array-like"], + "return_costs": ["boolean"], + "return_n_iter": ["boolean"], + }, + prefer_skip_nested_validation=False, +) +def graphical_lasso( + emp_cov, + alpha, + *, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + return_costs=False, + eps=np.finfo(np.float64).eps, + return_n_iter=False, +): + """L1-penalized covariance estimator. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + graph_lasso has been renamed to graphical_lasso + + Parameters + ---------- + emp_cov : array-like of shape (n_features, n_features) + Empirical covariance from which to compute the covariance estimate. + + alpha : float + The regularization parameter: the higher alpha, the more + regularization, the sparser the inverse covariance. + Range is (0, inf]. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + The maximum number of iterations. + + verbose : bool, default=False + If verbose is True, the objective function and dual gap are + printed at each iteration. + + return_costs : bool, default=False + If return_costs is True, the objective function and dual gap + at each iteration are returned. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + return_n_iter : bool, default=False + Whether or not to return the number of iterations. + + Returns + ------- + covariance : ndarray of shape (n_features, n_features) + The estimated covariance matrix. + + precision : ndarray of shape (n_features, n_features) + The estimated (sparse) precision matrix. + + costs : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + n_iter : int + Number of iterations. Returned only if `return_n_iter` is set to True. + + See Also + -------- + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with + cross-validated choice of the l1 penalty. + + Notes + ----- + The algorithm employed to solve this problem is the GLasso algorithm, + from the Friedman 2008 Biostatistics paper. It is the same algorithm + as in the R `glasso` package. + + One possible difference with the `glasso` R package is that the + diagonal coefficients are not penalized. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_sparse_spd_matrix + >>> from sklearn.covariance import empirical_covariance, graphical_lasso + >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42) + >>> rng = np.random.RandomState(42) + >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3) + >>> emp_cov = empirical_covariance(X, assume_centered=True) + >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05) + >>> emp_cov + array([[ 1.68..., 0.21..., -0.20...], + [ 0.21..., 0.22..., -0.08...], + [-0.20..., -0.08..., 0.23...]]) + """ + model = GraphicalLasso( + alpha=alpha, + mode=mode, + covariance="precomputed", + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + verbose=verbose, + eps=eps, + assume_centered=True, + ).fit(emp_cov) + + output = [model.covariance_, model.precision_] + if return_costs: + output.append(model.costs_) + if return_n_iter: + output.append(model.n_iter_) + return tuple(output) + + +class BaseGraphicalLasso(EmpiricalCovariance): + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "tol": [Interval(Real, 0, None, closed="right")], + "enet_tol": [Interval(Real, 0, None, closed="right")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "mode": [StrOptions({"cd", "lars"})], + "verbose": ["verbose"], + "eps": [Interval(Real, 0, None, closed="both")], + } + _parameter_constraints.pop("store_precision") + + def __init__( + self, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + mode="cd", + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__(assume_centered=assume_centered) + self.tol = tol + self.enet_tol = enet_tol + self.max_iter = max_iter + self.mode = mode + self.verbose = verbose + self.eps = eps + + +class GraphicalLasso(BaseGraphicalLasso): + """Sparse inverse covariance estimation with an l1-penalized estimator. + + For a usage example see + :ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + GraphLasso has been renamed to GraphicalLasso + + Parameters + ---------- + alpha : float, default=0.01 + The regularization parameter: the higher alpha, the more + regularization, the sparser the inverse covariance. + Range is (0, inf]. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + covariance : "precomputed", default=None + If covariance is "precomputed", the input data in `fit` is assumed + to be the covariance matrix. If `None`, the empirical covariance + is estimated from the data `X`. + + .. versionadded:: 1.3 + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + The maximum number of iterations. + + verbose : bool, default=False + If verbose is True, the objective function and dual gap are + plotted at each iteration. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + + n_iter_ : int + Number of iterations run. + + costs_ : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + .. versionadded:: 1.3 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + graphical_lasso : L1-penalized covariance estimator. + GraphicalLassoCV : Sparse inverse covariance with + cross-validated choice of the l1 penalty. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import GraphicalLasso + >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.2, 0.0, 0.3, 0.1], + ... [0.0, 0.0, 0.1, 0.7]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0], + ... cov=true_cov, + ... size=200) + >>> cov = GraphicalLasso().fit(X) + >>> np.around(cov.covariance_, decimals=3) + array([[0.816, 0.049, 0.218, 0.019], + [0.049, 0.364, 0.017, 0.034], + [0.218, 0.017, 0.322, 0.093], + [0.019, 0.034, 0.093, 0.69 ]]) + >>> np.around(cov.location_, decimals=3) + array([0.073, 0.04 , 0.038, 0.143]) + """ + + _parameter_constraints: dict = { + **BaseGraphicalLasso._parameter_constraints, + "alpha": [Interval(Real, 0, None, closed="both")], + "covariance": [StrOptions({"precomputed"}), None], + } + + def __init__( + self, + alpha=0.01, + *, + mode="cd", + covariance=None, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__( + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + mode=mode, + verbose=verbose, + eps=eps, + assume_centered=assume_centered, + ) + self.alpha = alpha + self.covariance = covariance + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the GraphicalLasso model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Covariance does not make sense for a single feature + X = validate_data(self, X, ensure_min_features=2, ensure_min_samples=2) + + if self.covariance == "precomputed": + emp_cov = X.copy() + self.location_ = np.zeros(X.shape[1]) + else: + emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + + self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( + emp_cov, + alpha=self.alpha, + cov_init=None, + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=self.max_iter, + verbose=self.verbose, + eps=self.eps, + ) + return self + + +# Cross-validation with GraphicalLasso +def graphical_lasso_path( + X, + alphas, + cov_init=None, + X_test=None, + mode="cd", + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + verbose=False, + eps=np.finfo(np.float64).eps, +): + """l1-penalized covariance estimator along a path of decreasing alphas + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + alphas : array-like of shape (n_alphas,) + The list of regularization parameters, decreasing order. + + cov_init : array of shape (n_features, n_features), default=None + The initial guess for the covariance. + + X_test : array of shape (n_test_samples, n_features), default=None + Optional test matrix to measure generalisation error. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where p > n. Elsewhere prefer cd + which is more numerically stable. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. The tolerance must be a positive + number. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. The tolerance must be a positive number. + + max_iter : int, default=100 + The maximum number of iterations. This parameter should be a strictly + positive integer. + + verbose : int or bool, default=False + The higher the verbosity flag, the more information is printed + during the fitting. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + Returns + ------- + covariances_ : list of shape (n_alphas,) of ndarray of shape \ + (n_features, n_features) + The estimated covariance matrices. + + precisions_ : list of shape (n_alphas,) of ndarray of shape \ + (n_features, n_features) + The estimated (sparse) precision matrices. + + scores_ : list of shape (n_alphas,), dtype=float + The generalisation error (log-likelihood) on the test data. + Returned only if test data is passed. + """ + inner_verbose = max(0, verbose - 1) + emp_cov = empirical_covariance(X) + if cov_init is None: + covariance_ = emp_cov.copy() + else: + covariance_ = cov_init + covariances_ = list() + precisions_ = list() + scores_ = list() + if X_test is not None: + test_emp_cov = empirical_covariance(X_test) + + for alpha in alphas: + try: + # Capture the errors, and move on + covariance_, precision_, _, _ = _graphical_lasso( + emp_cov, + alpha=alpha, + cov_init=covariance_, + mode=mode, + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + verbose=inner_verbose, + eps=eps, + ) + covariances_.append(covariance_) + precisions_.append(precision_) + if X_test is not None: + this_score = log_likelihood(test_emp_cov, precision_) + except FloatingPointError: + this_score = -np.inf + covariances_.append(np.nan) + precisions_.append(np.nan) + if X_test is not None: + if not np.isfinite(this_score): + this_score = -np.inf + scores_.append(this_score) + if verbose == 1: + sys.stderr.write(".") + elif verbose > 1: + if X_test is not None: + print( + "[graphical_lasso_path] alpha: %.2e, score: %.2e" + % (alpha, this_score) + ) + else: + print("[graphical_lasso_path] alpha: %.2e" % alpha) + if X_test is not None: + return covariances_, precisions_, scores_ + return covariances_, precisions_ + + +class GraphicalLassoCV(BaseGraphicalLasso): + """Sparse inverse covariance w/ cross-validated choice of the l1 penalty. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + .. versionchanged:: v0.20 + GraphLassoCV has been renamed to GraphicalLassoCV + + Parameters + ---------- + alphas : int or array-like of shape (n_alphas,), dtype=float, default=4 + If an integer is given, it fixes the number of points on the + grids of alpha to be used. If a list is given, it gives the + grid to be used. See the notes in the class docstring for + more details. Range is [1, inf) for an integer. + Range is (0, inf] for an array-like of floats. + + n_refinements : int, default=4 + The number of times the grid is refined. Not used if explicit + values of alphas are passed. Range is [1, inf). + + cv : int, cross-validation generator or iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.20 + ``cv`` default value if None changed from 3-fold to 5-fold. + + tol : float, default=1e-4 + The tolerance to declare convergence: if the dual gap goes below + this value, iterations are stopped. Range is (0, inf]. + + enet_tol : float, default=1e-4 + The tolerance for the elastic net solver used to calculate the descent + direction. This parameter controls the accuracy of the search direction + for a given column update, not of the overall parameter estimate. Only + used for mode='cd'. Range is (0, inf]. + + max_iter : int, default=100 + Maximum number of iterations. + + mode : {'cd', 'lars'}, default='cd' + The Lasso solver to use: coordinate descent or LARS. Use LARS for + very sparse underlying graphs, where number of features is greater + than number of samples. Elsewhere prefer cd which is more numerically + stable. + + n_jobs : int, default=None + Number of jobs to run in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + .. versionchanged:: v0.20 + `n_jobs` default changed from 1 to None + + verbose : bool, default=False + If verbose is True, the objective function and duality gap are + printed at each iteration. + + eps : float, default=eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Default is `np.finfo(np.float64).eps`. + + .. versionadded:: 1.3 + + assume_centered : bool, default=False + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Attributes + ---------- + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated precision matrix (inverse covariance). + + costs_ : list of (objective, dual_gap) pairs + The list of values of the objective function and the dual gap at + each iteration. Returned only if return_costs is True. + + .. versionadded:: 1.3 + + alpha_ : float + Penalization parameter selected. + + cv_results_ : dict of ndarrays + A dict with keys: + + alphas : ndarray of shape (n_alphas,) + All penalization parameters explored. + + split(k)_test_score : ndarray of shape (n_alphas,) + Log-likelihood score on left-out data across (k)th fold. + + .. versionadded:: 1.0 + + mean_test_score : ndarray of shape (n_alphas,) + Mean of scores over the folds. + + .. versionadded:: 1.0 + + std_test_score : ndarray of shape (n_alphas,) + Standard deviation of scores over the folds. + + .. versionadded:: 1.0 + + n_iter_ : int + Number of iterations run for the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + graphical_lasso : L1-penalized covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + + Notes + ----- + The search for the optimal penalization parameter (`alpha`) is done on an + iteratively refined grid: first the cross-validated scores on a grid are + computed, then a new refined grid is centered around the maximum, and so + on. + + One of the challenges which is faced here is that the solvers can + fail to converge to a well-conditioned estimate. The corresponding + values of `alpha` then come out as missing values, but the optimum may + be close to these missing values. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import GraphicalLassoCV + >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.2, 0.0, 0.3, 0.1], + ... [0.0, 0.0, 0.1, 0.7]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0], + ... cov=true_cov, + ... size=200) + >>> cov = GraphicalLassoCV().fit(X) + >>> np.around(cov.covariance_, decimals=3) + array([[0.816, 0.051, 0.22 , 0.017], + [0.051, 0.364, 0.018, 0.036], + [0.22 , 0.018, 0.322, 0.094], + [0.017, 0.036, 0.094, 0.69 ]]) + >>> np.around(cov.location_, decimals=3) + array([0.073, 0.04 , 0.038, 0.143]) + """ + + _parameter_constraints: dict = { + **BaseGraphicalLasso._parameter_constraints, + "alphas": [Interval(Integral, 0, None, closed="left"), "array-like"], + "n_refinements": [Interval(Integral, 1, None, closed="left")], + "cv": ["cv_object"], + "n_jobs": [Integral, None], + } + + def __init__( + self, + *, + alphas=4, + n_refinements=4, + cv=None, + tol=1e-4, + enet_tol=1e-4, + max_iter=100, + mode="cd", + n_jobs=None, + verbose=False, + eps=np.finfo(np.float64).eps, + assume_centered=False, + ): + super().__init__( + tol=tol, + enet_tol=enet_tol, + max_iter=max_iter, + mode=mode, + verbose=verbose, + eps=eps, + assume_centered=assume_centered, + ) + self.alphas = alphas + self.n_refinements = n_refinements + self.cv = cv + self.n_jobs = n_jobs + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, **params): + """Fit the GraphicalLasso covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + y : Ignored + Not used, present for API consistency by convention. + + **params : dict, default=None + Parameters to be passed to the CV splitter and the + cross_val_score function. + + .. versionadded:: 1.5 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Covariance does not make sense for a single feature + _raise_for_params(params, self, "fit") + + X = validate_data(self, X, ensure_min_features=2) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + emp_cov = empirical_covariance(X, assume_centered=self.assume_centered) + + cv = check_cv(self.cv, y, classifier=False) + + # List of (alpha, scores, covs) + path = list() + n_alphas = self.alphas + inner_verbose = max(0, self.verbose - 1) + + if _is_arraylike_not_scalar(n_alphas): + for alpha in self.alphas: + check_scalar( + alpha, + "alpha", + Real, + min_val=0, + max_val=np.inf, + include_boundaries="right", + ) + alphas = self.alphas + n_refinements = 1 + else: + n_refinements = self.n_refinements + alpha_1 = alpha_max(emp_cov) + alpha_0 = 1e-2 * alpha_1 + alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1), n_alphas)[::-1] + + if _routing_enabled(): + routed_params = process_routing(self, "fit", **params) + else: + routed_params = Bunch(splitter=Bunch(split={})) + + t0 = time.time() + for i in range(n_refinements): + with warnings.catch_warnings(): + # No need to see the convergence warnings on this grid: + # they will always be points that will not converge + # during the cross-validation + warnings.simplefilter("ignore", ConvergenceWarning) + # Compute the cross-validated loss on the current grid + + # NOTE: Warm-restarting graphical_lasso_path has been tried, + # and this did not allow to gain anything + # (same execution time with or without). + this_path = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(graphical_lasso_path)( + X[train], + alphas=alphas, + X_test=X[test], + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=int(0.1 * self.max_iter), + verbose=inner_verbose, + eps=self.eps, + ) + for train, test in cv.split(X, y, **routed_params.splitter.split) + ) + + # Little danse to transform the list in what we need + covs, _, scores = zip(*this_path) + covs = zip(*covs) + scores = zip(*scores) + path.extend(zip(alphas, scores, covs)) + path = sorted(path, key=operator.itemgetter(0), reverse=True) + + # Find the maximum (avoid using built in 'max' function to + # have a fully-reproducible selection of the smallest alpha + # in case of equality) + best_score = -np.inf + last_finite_idx = 0 + for index, (alpha, scores, _) in enumerate(path): + this_score = np.mean(scores) + if this_score >= 0.1 / np.finfo(np.float64).eps: + this_score = np.nan + if np.isfinite(this_score): + last_finite_idx = index + if this_score >= best_score: + best_score = this_score + best_index = index + + # Refine the grid + if best_index == 0: + # We do not need to go back: we have chosen + # the highest value of alpha for which there are + # non-zero coefficients + alpha_1 = path[0][0] + alpha_0 = path[1][0] + elif best_index == last_finite_idx and not best_index == len(path) - 1: + # We have non-converged models on the upper bound of the + # grid, we need to refine the grid there + alpha_1 = path[best_index][0] + alpha_0 = path[best_index + 1][0] + elif best_index == len(path) - 1: + alpha_1 = path[best_index][0] + alpha_0 = 0.01 * path[best_index][0] + else: + alpha_1 = path[best_index - 1][0] + alpha_0 = path[best_index + 1][0] + + if not _is_arraylike_not_scalar(n_alphas): + alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0), n_alphas + 2) + alphas = alphas[1:-1] + + if self.verbose and n_refinements > 1: + print( + "[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is" + % (i + 1, n_refinements, time.time() - t0) + ) + + path = list(zip(*path)) + grid_scores = list(path[1]) + alphas = list(path[0]) + # Finally, compute the score with alpha = 0 + alphas.append(0) + grid_scores.append( + cross_val_score( + EmpiricalCovariance(), + X, + cv=cv, + n_jobs=self.n_jobs, + verbose=inner_verbose, + params=params, + ) + ) + grid_scores = np.array(grid_scores) + + self.cv_results_ = {"alphas": np.array(alphas)} + + for i in range(grid_scores.shape[1]): + self.cv_results_[f"split{i}_test_score"] = grid_scores[:, i] + + self.cv_results_["mean_test_score"] = np.mean(grid_scores, axis=1) + self.cv_results_["std_test_score"] = np.std(grid_scores, axis=1) + + best_alpha = alphas[best_index] + self.alpha_ = best_alpha + + # Finally fit the model with the selected alpha + self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso( + emp_cov, + alpha=best_alpha, + mode=self.mode, + tol=self.tol, + enet_tol=self.enet_tol, + max_iter=self.max_iter, + verbose=inner_verbose, + eps=self.eps, + ) + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.5 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + splitter=check_cv(self.cv), + method_mapping=MethodMapping().add(callee="split", caller="fit"), + ) + return router diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..786c4e17b552b430d5271fc1dcebf24e823781c9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_robust_covariance.py @@ -0,0 +1,870 @@ +""" +Robust location and covariance estimators. + +Here are implemented estimators that are resistant to outliers. + +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.stats import chi2 + +from ..base import _fit_context +from ..utils import check_array, check_random_state +from ..utils._param_validation import Interval +from ..utils.extmath import fast_logdet +from ..utils.validation import validate_data +from ._empirical_covariance import EmpiricalCovariance, empirical_covariance + + +# Minimum Covariance Determinant +# Implementing of an algorithm by Rousseeuw & Van Driessen described in +# (A Fast Algorithm for the Minimum Covariance Determinant Estimator, +# 1999, American Statistical Association and the American Society +# for Quality, TECHNOMETRICS) +# XXX Is this really a public function? It's not listed in the docs or +# exported by sklearn.covariance. Deprecate? +def c_step( + X, + n_support, + remaining_iterations=30, + initial_estimates=None, + verbose=False, + cov_computation_method=empirical_covariance, + random_state=None, +): + """C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data set in which we look for the n_support observations whose + scatter matrix has minimum determinant. + + n_support : int + Number of observations to compute the robust estimates of location + and covariance from. This parameter must be greater than + `n_samples / 2`. + + remaining_iterations : int, default=30 + Number of iterations to perform. + According to [Rouseeuw1999]_, two iterations are sufficient to get + close to the minimum, and we never need more than 30 to reach + convergence. + + initial_estimates : tuple of shape (2,), default=None + Initial estimates of location and shape from which to run the c_step + procedure: + - initial_estimates[0]: an initial location estimate + - initial_estimates[1]: an initial covariance estimate + + verbose : bool, default=False + Verbose mode. + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + location : ndarray of shape (n_features,) + Robust location estimates. + + covariance : ndarray of shape (n_features, n_features) + Robust covariance estimates. + + support : ndarray of shape (n_samples,) + A mask for the `n_support` observations whose scatter matrix has + minimum determinant. + + References + ---------- + .. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + """ + X = np.asarray(X) + random_state = check_random_state(random_state) + return _c_step( + X, + n_support, + remaining_iterations=remaining_iterations, + initial_estimates=initial_estimates, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + + +def _c_step( + X, + n_support, + random_state, + remaining_iterations=30, + initial_estimates=None, + verbose=False, + cov_computation_method=empirical_covariance, +): + n_samples, n_features = X.shape + dist = np.inf + + # Initialisation + if initial_estimates is None: + # compute initial robust estimates from a random subset + support_indices = random_state.permutation(n_samples)[:n_support] + else: + # get initial robust estimates from the function parameters + location = initial_estimates[0] + covariance = initial_estimates[1] + # run a special iteration for that case (to get an initial support_indices) + precision = linalg.pinvh(covariance) + X_centered = X - location + dist = (np.dot(X_centered, precision) * X_centered).sum(1) + # compute new estimates + support_indices = np.argpartition(dist, n_support - 1)[:n_support] + + X_support = X[support_indices] + location = X_support.mean(0) + covariance = cov_computation_method(X_support) + + # Iterative procedure for Minimum Covariance Determinant computation + det = fast_logdet(covariance) + # If the data already has singular covariance, calculate the precision, + # as the loop below will not be entered. + if np.isinf(det): + precision = linalg.pinvh(covariance) + + previous_det = np.inf + while det < previous_det and remaining_iterations > 0 and not np.isinf(det): + # save old estimates values + previous_location = location + previous_covariance = covariance + previous_det = det + previous_support_indices = support_indices + # compute a new support_indices from the full data set mahalanobis distances + precision = linalg.pinvh(covariance) + X_centered = X - location + dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1) + # compute new estimates + support_indices = np.argpartition(dist, n_support - 1)[:n_support] + X_support = X[support_indices] + location = X_support.mean(axis=0) + covariance = cov_computation_method(X_support) + det = fast_logdet(covariance) + # update remaining iterations for early stopping + remaining_iterations -= 1 + + previous_dist = dist + dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1) + # Check if best fit already found (det => 0, logdet => -inf) + if np.isinf(det): + results = location, covariance, det, support_indices, dist + # Check convergence + if np.allclose(det, previous_det): + # c_step procedure converged + if verbose: + print( + "Optimal couple (location, covariance) found before" + " ending iterations (%d left)" % (remaining_iterations) + ) + results = location, covariance, det, support_indices, dist + elif det > previous_det: + # determinant has increased (should not happen) + warnings.warn( + "Determinant has increased; this should not happen: " + "log(det) > log(previous_det) (%.15f > %.15f). " + "You may want to try with a higher value of " + "support_fraction (current value: %.3f)." + % (det, previous_det, n_support / n_samples), + RuntimeWarning, + ) + results = ( + previous_location, + previous_covariance, + previous_det, + previous_support_indices, + previous_dist, + ) + + # Check early stopping + if remaining_iterations == 0: + if verbose: + print("Maximum number of iterations reached") + results = location, covariance, det, support_indices, dist + + location, covariance, det, support_indices, dist = results + # Convert from list of indices to boolean mask. + support = np.bincount(support_indices, minlength=n_samples).astype(bool) + return location, covariance, det, support, dist + + +def select_candidates( + X, + n_support, + n_trials, + select=1, + n_iter=30, + verbose=False, + cov_computation_method=empirical_covariance, + random_state=None, +): + """Finds the best pure subset of observations to compute MCD from it. + + The purpose of this function is to find the best sets of n_support + observations with respect to a minimization of their covariance + matrix determinant. Equivalently, it removes n_samples-n_support + observations to construct what we call a pure data set (i.e. not + containing outliers). The list of the observations of the pure + data set is referred to as the `support`. + + Starting from a random support, the pure data set is found by the + c_step procedure introduced by Rousseeuw and Van Driessen in + [RV]_. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data (sub)set in which we look for the n_support purest observations. + + n_support : int + The number of samples the pure data set must contain. + This parameter must be in the range `[(n + p + 1)/2] < n_support < n`. + + n_trials : int or tuple of shape (2,) + Number of different initial sets of observations from which to + run the algorithm. This parameter should be a strictly positive + integer. + Instead of giving a number of trials to perform, one can provide a + list of initial estimates that will be used to iteratively run + c_step procedures. In this case: + - n_trials[0]: array-like, shape (n_trials, n_features) + is the list of `n_trials` initial location estimates + - n_trials[1]: array-like, shape (n_trials, n_features, n_features) + is the list of `n_trials` initial covariances estimates + + select : int, default=1 + Number of best candidates results to return. This parameter must be + a strictly positive integer. + + n_iter : int, default=30 + Maximum number of iterations for the c_step procedure. + (2 is enough to be close to the final solution. "Never" exceeds 20). + This parameter must be a strictly positive integer. + + verbose : bool, default=False + Control the output verbosity. + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return an array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + See Also + --------- + c_step + + Returns + ------- + best_locations : ndarray of shape (select, n_features) + The `select` location estimates computed from the `select` best + supports found in the data set (`X`). + + best_covariances : ndarray of shape (select, n_features, n_features) + The `select` covariance estimates computed from the `select` + best supports found in the data set (`X`). + + best_supports : ndarray of shape (select, n_samples) + The `select` best supports found in the data set (`X`). + + References + ---------- + .. [RV] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + """ + random_state = check_random_state(random_state) + + if isinstance(n_trials, Integral): + run_from_estimates = False + elif isinstance(n_trials, tuple): + run_from_estimates = True + estimates_list = n_trials + n_trials = estimates_list[0].shape[0] + else: + raise TypeError( + "Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)" + % (n_trials, type(n_trials)) + ) + + # compute `n_trials` location and shape estimates candidates in the subset + all_estimates = [] + if not run_from_estimates: + # perform `n_trials` computations from random initial supports + for j in range(n_trials): + all_estimates.append( + _c_step( + X, + n_support, + remaining_iterations=n_iter, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + ) + else: + # perform computations from every given initial estimates + for j in range(n_trials): + initial_estimates = (estimates_list[0][j], estimates_list[1][j]) + all_estimates.append( + _c_step( + X, + n_support, + remaining_iterations=n_iter, + initial_estimates=initial_estimates, + verbose=verbose, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + ) + all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip( + *all_estimates + ) + # find the `n_best` best results among the `n_trials` ones + index_best = np.argsort(all_dets_sub)[:select] + best_locations = np.asarray(all_locs_sub)[index_best] + best_covariances = np.asarray(all_covs_sub)[index_best] + best_supports = np.asarray(all_supports_sub)[index_best] + best_ds = np.asarray(all_ds_sub)[index_best] + + return best_locations, best_covariances, best_supports, best_ds + + +def fast_mcd( + X, + support_fraction=None, + cov_computation_method=empirical_covariance, + random_state=None, +): + """Estimate the Minimum Covariance Determinant matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. Default is `None`, which implies that the minimum + value of `support_fraction` will be used within the algorithm: + `(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be + in the range (0, 1). + + cov_computation_method : callable, \ + default=:func:`sklearn.covariance.empirical_covariance` + The function which will be used to compute the covariance. + Must return an array of shape (n_features, n_features). + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Returns + ------- + location : ndarray of shape (n_features,) + Robust location of the data. + + covariance : ndarray of shape (n_features, n_features) + Robust covariance of the features. + + support : ndarray of shape (n_samples,), dtype=bool + A mask of the observations that have been used to compute + the robust location and covariance estimates of the data set. + + Notes + ----- + The FastMCD algorithm has been introduced by Rousseuw and Van Driessen + in "A Fast Algorithm for the Minimum Covariance Determinant Estimator, + 1999, American Statistical Association and the American Society + for Quality, TECHNOMETRICS". + The principle is to compute robust estimates and random subsets before + pooling them into a larger subsets, and finally into the full data set. + Depending on the size of the initial sample, we have one, two or three + such computation levels. + + Note that only raw estimates are returned. If one is interested in + the correction and reweighting steps described in [RouseeuwVan]_, + see the MinCovDet object. + + References + ---------- + + .. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + + .. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun, + Asymptotics For The Minimum Covariance Determinant Estimator, + The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 + """ + random_state = check_random_state(random_state) + + X = check_array(X, ensure_min_samples=2, estimator="fast_mcd") + n_samples, n_features = X.shape + + # minimum breakdown value + if support_fraction is None: + n_support = int(np.ceil(0.5 * (n_samples + n_features + 1))) + else: + n_support = int(support_fraction * n_samples) + + # 1-dimensional case quick computation + # (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust + # Regression and Outlier Detection, John Wiley & Sons, chapter 4) + if n_features == 1: + if n_support < n_samples: + # find the sample shortest halves + X_sorted = np.sort(np.ravel(X)) + diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)] + halves_start = np.where(diff == np.min(diff))[0] + # take the middle points' mean to get the robust location estimate + location = ( + 0.5 + * (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean() + ) + support = np.zeros(n_samples, dtype=bool) + X_centered = X - location + support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True + covariance = np.asarray([[np.var(X[support])]]) + location = np.array([location]) + # get precision matrix in an optimized way + precision = linalg.pinvh(covariance) + dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) + else: + support = np.ones(n_samples, dtype=bool) + covariance = np.asarray([[np.var(X)]]) + location = np.asarray([np.mean(X)]) + X_centered = X - location + # get precision matrix in an optimized way + precision = linalg.pinvh(covariance) + dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1) + # Starting FastMCD algorithm for p-dimensional case + if (n_samples > 500) and (n_features > 1): + # 1. Find candidate supports on subsets + # a. split the set in subsets of size ~ 300 + n_subsets = n_samples // 300 + n_samples_subsets = n_samples // n_subsets + samples_shuffle = random_state.permutation(n_samples) + h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples)))) + # b. perform a total of 500 trials + n_trials_tot = 500 + # c. select 10 best (location, covariance) for each subset + n_best_sub = 10 + n_trials = max(10, n_trials_tot // n_subsets) + n_best_tot = n_subsets * n_best_sub + all_best_locations = np.zeros((n_best_tot, n_features)) + try: + all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) + except MemoryError: + # The above is too big. Let's try with something much small + # (and less optimal) + n_best_tot = 10 + all_best_covariances = np.zeros((n_best_tot, n_features, n_features)) + n_best_sub = 2 + for i in range(n_subsets): + low_bound = i * n_samples_subsets + high_bound = low_bound + n_samples_subsets + current_subset = X[samples_shuffle[low_bound:high_bound]] + best_locations_sub, best_covariances_sub, _, _ = select_candidates( + current_subset, + h_subset, + n_trials, + select=n_best_sub, + n_iter=2, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub) + all_best_locations[subset_slice] = best_locations_sub + all_best_covariances[subset_slice] = best_covariances_sub + # 2. Pool the candidate supports into a merged set + # (possibly the full dataset) + n_samples_merged = min(1500, n_samples) + h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples)))) + if n_samples > 1500: + n_best_merged = 10 + else: + n_best_merged = 1 + # find the best couples (location, covariance) on the merged set + selection = random_state.permutation(n_samples)[:n_samples_merged] + locations_merged, covariances_merged, supports_merged, d = select_candidates( + X[selection], + h_merged, + n_trials=(all_best_locations, all_best_covariances), + select=n_best_merged, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + # 3. Finally get the overall best (locations, covariance) couple + if n_samples < 1500: + # directly get the best couple (location, covariance) + location = locations_merged[0] + covariance = covariances_merged[0] + support = np.zeros(n_samples, dtype=bool) + dist = np.zeros(n_samples) + support[selection] = supports_merged[0] + dist[selection] = d[0] + else: + # select the best couple on the full dataset + locations_full, covariances_full, supports_full, d = select_candidates( + X, + n_support, + n_trials=(locations_merged, covariances_merged), + select=1, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + location = locations_full[0] + covariance = covariances_full[0] + support = supports_full[0] + dist = d[0] + elif n_features > 1: + # 1. Find the 10 best couples (location, covariance) + # considering two iterations + n_trials = 30 + n_best = 10 + locations_best, covariances_best, _, _ = select_candidates( + X, + n_support, + n_trials=n_trials, + select=n_best, + n_iter=2, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + # 2. Select the best couple on the full dataset amongst the 10 + locations_full, covariances_full, supports_full, d = select_candidates( + X, + n_support, + n_trials=(locations_best, covariances_best), + select=1, + cov_computation_method=cov_computation_method, + random_state=random_state, + ) + location = locations_full[0] + covariance = covariances_full[0] + support = supports_full[0] + dist = d[0] + + return location, covariance, support, dist + + +class MinCovDet(EmpiricalCovariance): + """Minimum Covariance Determinant (MCD): robust estimator of covariance. + + The Minimum Covariance Determinant covariance estimator is to be applied + on Gaussian-distributed data, but could still be relevant on data + drawn from a unimodal, symmetric distribution. It is not meant to be used + with multi-modal data (the algorithm used to fit a MinCovDet object is + likely to fail in such a case). + One should consider projection pursuit methods to deal with multi-modal + datasets. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, the support of the robust location and the covariance + estimates is computed, and a covariance estimate is recomputed from + it, without centering the data. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, the robust location and covariance are directly computed + with the FastMCD algorithm without additional treatment. + + support_fraction : float, default=None + The proportion of points to be included in the support of the raw + MCD estimate. Default is None, which implies that the minimum + value of support_fraction will be used within the algorithm: + `(n_samples + n_features + 1) / 2 * n_samples`. The parameter must be + in the range (0, 1]. + + random_state : int, RandomState instance or None, default=None + Determines the pseudo random number generator for shuffling the data. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + raw_location_ : ndarray of shape (n_features,) + The raw robust estimated location before correction and re-weighting. + + raw_covariance_ : ndarray of shape (n_features, n_features) + The raw robust estimated covariance before correction and re-weighting. + + raw_support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the raw robust estimates of location and shape, before correction + and re-weighting. + + location_ : ndarray of shape (n_features,) + Estimated robust location. + + covariance_ : ndarray of shape (n_features, n_features) + Estimated robust covariance matrix. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + support_ : ndarray of shape (n_samples,) + A mask of the observations that have been used to compute + the robust estimates of location and shape. + + dist_ : ndarray of shape (n_samples,) + Mahalanobis distances of the training set (on which :meth:`fit` is + called) observations. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + References + ---------- + + .. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression. + J. Am Stat Ass, 79:871, 1984. + .. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant + Estimator, 1999, American Statistical Association and the American + Society for Quality, TECHNOMETRICS + .. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun, + Asymptotics For The Minimum Covariance Determinant Estimator, + The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400 + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import MinCovDet + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = MinCovDet(random_state=0).fit(X) + >>> cov.covariance_ + array([[0.7411..., 0.2535...], + [0.2535..., 0.3053...]]) + >>> cov.location_ + array([0.0813... , 0.0427...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "support_fraction": [Interval(Real, 0, 1, closed="right"), None], + "random_state": ["random_state"], + } + _nonrobust_covariance = staticmethod(empirical_covariance) + + def __init__( + self, + *, + store_precision=True, + assume_centered=False, + support_fraction=None, + random_state=None, + ): + self.store_precision = store_precision + self.assume_centered = assume_centered + self.support_fraction = support_fraction + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit a Minimum Covariance Determinant with the FastMCD algorithm. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X, ensure_min_samples=2, estimator="MinCovDet") + random_state = check_random_state(self.random_state) + n_samples, n_features = X.shape + # check that the empirical covariance is full rank + if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features: + warnings.warn( + "The covariance matrix associated to your dataset is not full rank" + ) + # compute and store raw estimates + raw_location, raw_covariance, raw_support, raw_dist = fast_mcd( + X, + support_fraction=self.support_fraction, + cov_computation_method=self._nonrobust_covariance, + random_state=random_state, + ) + if self.assume_centered: + raw_location = np.zeros(n_features) + raw_covariance = self._nonrobust_covariance( + X[raw_support], assume_centered=True + ) + # get precision matrix in an optimized way + precision = linalg.pinvh(raw_covariance) + raw_dist = np.sum(np.dot(X, precision) * X, 1) + self.raw_location_ = raw_location + self.raw_covariance_ = raw_covariance + self.raw_support_ = raw_support + self.location_ = raw_location + self.support_ = raw_support + self.dist_ = raw_dist + # obtain consistency at normal models + self.correct_covariance(X) + # re-weight estimator + self.reweight_covariance(X) + + return self + + def correct_covariance(self, data): + """Apply a correction to raw Minimum Covariance Determinant estimates. + + Correction using the empirical correction factor suggested + by Rousseeuw and Van Driessen in [RVD]_. + + Parameters + ---------- + data : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + The data set must be the one which was used to compute + the raw estimates. + + Returns + ------- + covariance_corrected : ndarray of shape (n_features, n_features) + Corrected robust covariance estimate. + + References + ---------- + + .. [RVD] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + """ + + # Check that the covariance of the support data is not equal to 0. + # Otherwise self.dist_ = 0 and thus correction = 0. + n_samples = len(self.dist_) + n_support = np.sum(self.support_) + if n_support < n_samples and np.allclose(self.raw_covariance_, 0): + raise ValueError( + "The covariance matrix of the support data " + "is equal to 0, try to increase support_fraction" + ) + correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5) + covariance_corrected = self.raw_covariance_ * correction + self.dist_ /= correction + return covariance_corrected + + def reweight_covariance(self, data): + """Re-weight raw Minimum Covariance Determinant estimates. + + Re-weight observations using Rousseeuw's method (equivalent to + deleting outlying observations from the data set before + computing location and covariance estimates) described + in [RVDriessen]_. + + Parameters + ---------- + data : array-like of shape (n_samples, n_features) + The data matrix, with p features and n samples. + The data set must be the one which was used to compute + the raw estimates. + + Returns + ------- + location_reweighted : ndarray of shape (n_features,) + Re-weighted robust location estimate. + + covariance_reweighted : ndarray of shape (n_features, n_features) + Re-weighted robust covariance estimate. + + support_reweighted : ndarray of shape (n_samples,), dtype=bool + A mask of the observations that have been used to compute + the re-weighted robust location and covariance estimates. + + References + ---------- + + .. [RVDriessen] A Fast Algorithm for the Minimum Covariance + Determinant Estimator, 1999, American Statistical Association + and the American Society for Quality, TECHNOMETRICS + """ + n_samples, n_features = data.shape + mask = self.dist_ < chi2(n_features).isf(0.025) + if self.assume_centered: + location_reweighted = np.zeros(n_features) + else: + location_reweighted = data[mask].mean(0) + covariance_reweighted = self._nonrobust_covariance( + data[mask], assume_centered=self.assume_centered + ) + support_reweighted = np.zeros(n_samples, dtype=bool) + support_reweighted[mask] = True + self._set_covariance(covariance_reweighted) + self.location_ = location_reweighted + self.support_ = support_reweighted + X_centered = data - self.location_ + self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1) + return location_reweighted, covariance_reweighted, support_reweighted diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..ab875d83b30ec3b3a23ac7a89d01194518ccd957 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/_shrunk_covariance.py @@ -0,0 +1,820 @@ +""" +Covariance estimators using shrinkage. + +Shrinkage corresponds to regularising `cov` using a convex combination: +shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate. + +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +# avoid division truncation +import warnings +from numbers import Integral, Real + +import numpy as np + +from ..base import _fit_context +from ..utils import check_array +from ..utils._param_validation import Interval, validate_params +from ..utils.validation import validate_data +from . import EmpiricalCovariance, empirical_covariance + + +def _ledoit_wolf(X, *, assume_centered, block_size): + """Estimate the shrunk Ledoit-Wolf covariance matrix.""" + # for only one feature, the result is the same whatever the shrinkage + if len(X.shape) == 2 and X.shape[1] == 1: + if not assume_centered: + X = X - X.mean() + return np.atleast_2d((X**2).mean()), 0.0 + n_features = X.shape[1] + + # get Ledoit-Wolf shrinkage + shrinkage = ledoit_wolf_shrinkage( + X, assume_centered=assume_centered, block_size=block_size + ) + emp_cov = empirical_covariance(X, assume_centered=assume_centered) + mu = np.sum(np.trace(emp_cov)) / n_features + shrunk_cov = (1.0 - shrinkage) * emp_cov + shrunk_cov.flat[:: n_features + 1] += shrinkage * mu + + return shrunk_cov, shrinkage + + +def _oas(X, *, assume_centered=False): + """Estimate covariance with the Oracle Approximating Shrinkage algorithm. + + The formulation is based on [1]_. + [1] "Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + https://arxiv.org/pdf/0907.4698.pdf + """ + if len(X.shape) == 2 and X.shape[1] == 1: + # for only one feature, the result is the same whatever the shrinkage + if not assume_centered: + X = X - X.mean() + return np.atleast_2d((X**2).mean()), 0.0 + + n_samples, n_features = X.shape + + emp_cov = empirical_covariance(X, assume_centered=assume_centered) + + # The shrinkage is defined as: + # shrinkage = min( + # trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1 + # ) + # where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]). + # The factor 2 / p is omitted since it does not impact the value of the estimator + # for large p. + + # Instead of computing trace(S)**2, we can compute the average of the squared + # elements of S that is equal to trace(S)**2 / p**2. + # See the definition of the Frobenius norm: + # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm + alpha = np.mean(emp_cov**2) + mu = np.trace(emp_cov) / n_features + mu_squared = mu**2 + + # The factor 1 / p**2 will cancel out since it is in both the numerator and + # denominator + num = alpha + mu_squared + den = (n_samples + 1) * (alpha - mu_squared / n_features) + shrinkage = 1.0 if den == 0 else min(num / den, 1.0) + + # The shrunk covariance is defined as: + # (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1]) + # where S is the empirical covariance and F is the shrinkage target defined as + # F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1]) + shrunk_cov = (1.0 - shrinkage) * emp_cov + shrunk_cov.flat[:: n_features + 1] += shrinkage * mu + + return shrunk_cov, shrinkage + + +############################################################################### +# Public API +# ShrunkCovariance estimator + + +@validate_params( + { + "emp_cov": ["array-like"], + "shrinkage": [Interval(Real, 0, 1, closed="both")], + }, + prefer_skip_nested_validation=True, +) +def shrunk_covariance(emp_cov, shrinkage=0.1): + """Calculate covariance matrices shrunk on the diagonal. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + emp_cov : array-like of shape (..., n_features, n_features) + Covariance matrices to be shrunk, at least 2D ndarray. + + shrinkage : float, default=0.1 + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + Returns + ------- + shrunk_cov : ndarray of shape (..., n_features, n_features) + Shrunk covariance matrices. + + Notes + ----- + The regularized (shrunk) covariance is given by:: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where `mu = trace(cov) / n_features`. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.datasets import make_gaussian_quantiles + >>> from sklearn.covariance import empirical_covariance, shrunk_covariance + >>> real_cov = np.array([[.8, .3], [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) + >>> shrunk_covariance(empirical_covariance(X)) + array([[0.73..., 0.25...], + [0.25..., 0.41...]]) + """ + emp_cov = check_array(emp_cov, allow_nd=True) + n_features = emp_cov.shape[-1] + + shrunk_cov = (1.0 - shrinkage) * emp_cov + mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features + mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim))) + shrunk_cov += shrinkage * mu * np.eye(n_features) + + return shrunk_cov + + +class ShrunkCovariance(EmpiricalCovariance): + """Covariance estimator with shrinkage. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data will be centered before computation. + + shrinkage : float, default=0.1 + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + + Notes + ----- + The regularized covariance is given by: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import ShrunkCovariance + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> cov = ShrunkCovariance().fit(X) + >>> cov.covariance_ + array([[0.7387..., 0.2536...], + [0.2536..., 0.4110...]]) + >>> cov.location_ + array([0.0622..., 0.0193...]) + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "shrinkage": [Interval(Real, 0, 1, closed="both")], + } + + def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1): + super().__init__( + store_precision=store_precision, assume_centered=assume_centered + ) + self.shrinkage = shrinkage + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the shrunk covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X) + # Not calling the parent object to fit, to avoid a potential + # matrix inversion when setting the precision + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance = empirical_covariance(X, assume_centered=self.assume_centered) + covariance = shrunk_covariance(covariance, self.shrinkage) + self._set_covariance(covariance) + + return self + + +# Ledoit-Wolf estimator + + +@validate_params( + { + "X": ["array-like"], + "assume_centered": ["boolean"], + "block_size": [Interval(Integral, 1, None, closed="left")], + }, + prefer_skip_nested_validation=True, +) +def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000): + """Estimate the shrunk Ledoit-Wolf covariance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split. + + Returns + ------- + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularized (shrunk) covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import ledoit_wolf_shrinkage + >>> real_cov = np.array([[.4, .2], [.2, .8]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) + >>> shrinkage_coefficient = ledoit_wolf_shrinkage(X) + >>> shrinkage_coefficient + np.float64(0.23...) + """ + X = check_array(X) + # for only one feature, the result is the same whatever the shrinkage + if len(X.shape) == 2 and X.shape[1] == 1: + return 0.0 + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + + if X.shape[0] == 1: + warnings.warn( + "Only one sample available. You may want to reshape your data array" + ) + n_samples, n_features = X.shape + + # optionally center data + if not assume_centered: + X = X - X.mean(0) + + # A non-blocked version of the computation is present in the tests + # in tests/test_covariance.py + + # number of blocks to split the covariance matrix into + n_splits = int(n_features / block_size) + X2 = X**2 + emp_cov_trace = np.sum(X2, axis=0) / n_samples + mu = np.sum(emp_cov_trace) / n_features + beta_ = 0.0 # sum of the coefficients of + delta_ = 0.0 # sum of the *squared* coefficients of + # starting block computation + for i in range(n_splits): + for j in range(n_splits): + rows = slice(block_size * i, block_size * (i + 1)) + cols = slice(block_size * j, block_size * (j + 1)) + beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols])) + delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2) + rows = slice(block_size * i, block_size * (i + 1)) + beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :])) + delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2) + for j in range(n_splits): + cols = slice(block_size * j, block_size * (j + 1)) + beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols])) + delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2) + delta_ += np.sum( + np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2 + ) + delta_ /= n_samples**2 + beta_ += np.sum( + np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :]) + ) + # use delta_ to compute beta + beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_) + # delta is the sum of the squared coefficients of ( - mu*Id) / p + delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2 + delta /= n_features + # get final beta as the min between beta and delta + # We do this to prevent shrinking more than "1", which would invert + # the value of covariances + beta = min(beta, delta) + # finally get shrinkage + shrinkage = 0 if beta == 0 else beta / delta + return shrinkage + + +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def ledoit_wolf(X, *, assume_centered=False, block_size=1000): + """Estimate the shrunk Ledoit-Wolf covariance matrix. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split. + This is purely a memory optimization and does not affect results. + + Returns + ------- + shrunk_cov : ndarray of shape (n_features, n_features) + Shrunk covariance. + + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularized (shrunk) covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import empirical_covariance, ledoit_wolf + >>> real_cov = np.array([[.4, .2], [.2, .8]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50) + >>> covariance, shrinkage = ledoit_wolf(X) + >>> covariance + array([[0.44..., 0.16...], + [0.16..., 0.80...]]) + >>> shrinkage + np.float64(0.23...) + """ + estimator = LedoitWolf( + assume_centered=assume_centered, + block_size=block_size, + store_precision=False, + ).fit(X) + + return estimator.covariance_, estimator.shrinkage_ + + +class LedoitWolf(EmpiricalCovariance): + """LedoitWolf Estimator. + + Ledoit-Wolf is a particular form of shrinkage, where the shrinkage + coefficient is computed using O. Ledoit and M. Wolf's formula as + described in "A Well-Conditioned Estimator for Large-Dimensional + Covariance Matrices", Ledoit and Wolf, Journal of Multivariate + Analysis, Volume 88, Issue 2, February 2004, pages 365-411. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data will be centered before computation. + + block_size : int, default=1000 + Size of blocks into which the covariance matrix will be split + during its Ledoit-Wolf estimation. This is purely a memory + optimization and does not affect results. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + shrinkage_ : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + OAS : Oracle Approximating Shrinkage Estimator. + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features) + + where mu = trace(cov) / n_features + and shrinkage is given by the Ledoit and Wolf formula (see References) + + References + ---------- + "A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices", + Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2, + February 2004, pages 365-411. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import LedoitWolf + >>> real_cov = np.array([[.4, .2], + ... [.2, .8]]) + >>> np.random.seed(0) + >>> X = np.random.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=50) + >>> cov = LedoitWolf().fit(X) + >>> cov.covariance_ + array([[0.4406..., 0.1616...], + [0.1616..., 0.8022...]]) + >>> cov.location_ + array([ 0.0595... , -0.0075...]) + + See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py` + for a more detailed example. + """ + + _parameter_constraints: dict = { + **EmpiricalCovariance._parameter_constraints, + "block_size": [Interval(Integral, 1, None, closed="left")], + } + + def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000): + super().__init__( + store_precision=store_precision, assume_centered=assume_centered + ) + self.block_size = block_size + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the Ledoit-Wolf shrunk covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + # Not calling the parent object to fit, to avoid computing the + # covariance matrix (and potentially the precision) + X = validate_data(self, X) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance, shrinkage = _ledoit_wolf( + X - self.location_, assume_centered=True, block_size=self.block_size + ) + self.shrinkage_ = shrinkage + self._set_covariance(covariance) + + return self + + +# OAS estimator +@validate_params( + {"X": ["array-like"]}, + prefer_skip_nested_validation=False, +) +def oas(X, *, assume_centered=False): + """Estimate covariance with the Oracle Approximating Shrinkage. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Data from which to compute the covariance estimate. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful to work with data whose mean is significantly equal to + zero but is not exactly zero. + If False, data will be centered before computation. + + Returns + ------- + shrunk_cov : array-like of shape (n_features, n_features) + Shrunk covariance. + + shrinkage : float + Coefficient in the convex combination used for the computation + of the shrunk estimate. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features), + + where mu = trace(cov) / n_features and shrinkage is given by the OAS formula + (see [1]_). + + The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In + the original article, formula (23) states that 2/p (p being the number of + features) is multiplied by Trace(cov*cov) in both the numerator and + denominator, but this operation is omitted because for a large p, the value + of 2/p is so small that it doesn't affect the value of the estimator. + + References + ---------- + .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + <0907.4698>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import oas + >>> rng = np.random.RandomState(0) + >>> real_cov = [[.8, .3], [.3, .4]] + >>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500) + >>> shrunk_cov, shrinkage = oas(X) + >>> shrunk_cov + array([[0.7533..., 0.2763...], + [0.2763..., 0.3964...]]) + >>> shrinkage + np.float64(0.0195...) + """ + estimator = OAS( + assume_centered=assume_centered, + ).fit(X) + return estimator.covariance_, estimator.shrinkage_ + + +class OAS(EmpiricalCovariance): + """Oracle Approximating Shrinkage Estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool, default=True + Specify if the estimated precision is stored. + + assume_centered : bool, default=False + If True, data will not be centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data will be centered before computation. + + Attributes + ---------- + covariance_ : ndarray of shape (n_features, n_features) + Estimated covariance matrix. + + location_ : ndarray of shape (n_features,) + Estimated location, i.e. the estimated mean. + + precision_ : ndarray of shape (n_features, n_features) + Estimated pseudo inverse matrix. + (stored only if store_precision is True) + + shrinkage_ : float + coefficient in the convex combination used for the computation + of the shrunk estimate. Range is [0, 1]. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + EllipticEnvelope : An object for detecting outliers in + a Gaussian distributed dataset. + EmpiricalCovariance : Maximum likelihood covariance estimator. + GraphicalLasso : Sparse inverse covariance estimation + with an l1-penalized estimator. + GraphicalLassoCV : Sparse inverse covariance with cross-validated + choice of the l1 penalty. + LedoitWolf : LedoitWolf Estimator. + MinCovDet : Minimum Covariance Determinant + (robust estimator of covariance). + ShrunkCovariance : Covariance estimator with shrinkage. + + Notes + ----- + The regularised covariance is: + + (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features), + + where mu = trace(cov) / n_features and shrinkage is given by the OAS formula + (see [1]_). + + The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In + the original article, formula (23) states that 2/p (p being the number of + features) is multiplied by Trace(cov*cov) in both the numerator and + denominator, but this operation is omitted because for a large p, the value + of 2/p is so small that it doesn't affect the value of the estimator. + + References + ---------- + .. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.", + Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O. + IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010. + <0907.4698>` + + Examples + -------- + >>> import numpy as np + >>> from sklearn.covariance import OAS + >>> from sklearn.datasets import make_gaussian_quantiles + >>> real_cov = np.array([[.8, .3], + ... [.3, .4]]) + >>> rng = np.random.RandomState(0) + >>> X = rng.multivariate_normal(mean=[0, 0], + ... cov=real_cov, + ... size=500) + >>> oas = OAS().fit(X) + >>> oas.covariance_ + array([[0.7533..., 0.2763...], + [0.2763..., 0.3964...]]) + >>> oas.precision_ + array([[ 1.7833..., -1.2431... ], + [-1.2431..., 3.3889...]]) + >>> oas.shrinkage_ + np.float64(0.0195...) + + See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py` + for a more detailed example. + """ + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None): + """Fit the Oracle Approximating Shrinkage covariance model to X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data, where `n_samples` is the number of samples + and `n_features` is the number of features. + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + Returns the instance itself. + """ + X = validate_data(self, X) + # Not calling the parent object to fit, to avoid computing the + # covariance matrix (and potentially the precision) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + + covariance, shrinkage = _oas(X - self.location_, assume_centered=True) + self.shrinkage_ = shrinkage + self._set_covariance(covariance) + + return self diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5a7f966bb21c126c59162c0dfeca39ac234d102 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ff4ddd20a9bfb900e62b1fb1d85c32dcc138bd0 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_covariance.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..297d7257feda68ae091f33e11947ed959c056753 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_elliptic_envelope.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45c1a81c9f97bb29decfa7234faca83bc9d93478 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_graphical_lasso.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..936c3f12c09c7a4b5911b5e4dc995cf7e1175b0d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/__pycache__/test_robust_covariance.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..9c55012c158e19df20e4c4770867fc19398213d0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_covariance.py @@ -0,0 +1,374 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.covariance import ( + OAS, + EmpiricalCovariance, + LedoitWolf, + ShrunkCovariance, + empirical_covariance, + ledoit_wolf, + ledoit_wolf_shrinkage, + oas, + shrunk_covariance, +) +from sklearn.covariance._shrunk_covariance import _ledoit_wolf +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + +from .._shrunk_covariance import _oas + +X, _ = datasets.load_diabetes(return_X_y=True) +X_1d = X[:, 0] +n_samples, n_features = X.shape + + +def test_covariance(): + # Tests Covariance module on a simple dataset. + # test covariance fit from data + cov = EmpiricalCovariance() + cov.fit(X) + emp_cov = empirical_covariance(X) + assert_array_almost_equal(emp_cov, cov.covariance_, 4) + assert_almost_equal(cov.error_norm(emp_cov), 0) + assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0) + assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0) + assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0) + assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0) + with pytest.raises(NotImplementedError): + cov.error_norm(emp_cov, norm="foo") + # Mahalanobis distances computation test + mahal_dist = cov.mahalanobis(X) + assert np.amin(mahal_dist) > 0 + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + cov = EmpiricalCovariance() + cov.fit(X_1d) + assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4) + assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0) + assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0) + + # test with one sample + # Create X with 1 sample and 5 features + X_1sample = np.arange(5).reshape(1, 5) + cov = EmpiricalCovariance() + warn_msg = "Only one sample available. You may want to reshape your data array" + with pytest.warns(UserWarning, match=warn_msg): + cov.fit(X_1sample) + + assert_array_almost_equal(cov.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) + + # test integer type + X_integer = np.asarray([[0, 1], [1, 0]]) + result = np.asarray([[0.25, -0.25], [-0.25, 0.25]]) + assert_array_almost_equal(empirical_covariance(X_integer), result) + + # test centered case + cov = EmpiricalCovariance(assume_centered=True) + cov.fit(X) + assert_array_equal(cov.location_, np.zeros(X.shape[1])) + + +@pytest.mark.parametrize("n_matrices", [1, 3]) +def test_shrunk_covariance_func(n_matrices): + """Check `shrunk_covariance` function.""" + + n_features = 2 + cov = np.ones((n_features, n_features)) + cov_target = np.array([[1, 0.5], [0.5, 1]]) + + if n_matrices > 1: + cov = np.repeat(cov[np.newaxis, ...], n_matrices, axis=0) + cov_target = np.repeat(cov_target[np.newaxis, ...], n_matrices, axis=0) + + cov_shrunk = shrunk_covariance(cov, 0.5) + assert_allclose(cov_shrunk, cov_target) + + +def test_shrunk_covariance(): + """Check consistency between `ShrunkCovariance` and `shrunk_covariance`.""" + + # Tests ShrunkCovariance module on a simple dataset. + # compare shrunk covariance obtained from data and from MLE estimate + cov = ShrunkCovariance(shrinkage=0.5) + cov.fit(X) + assert_array_almost_equal( + shrunk_covariance(empirical_covariance(X), shrinkage=0.5), cov.covariance_, 4 + ) + + # same test with shrinkage not provided + cov = ShrunkCovariance() + cov.fit(X) + assert_array_almost_equal( + shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4 + ) + + # same test with shrinkage = 0 (<==> empirical_covariance) + cov = ShrunkCovariance(shrinkage=0.0) + cov.fit(X) + assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + cov = ShrunkCovariance(shrinkage=0.3) + cov.fit(X_1d) + assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4) + + # test shrinkage coeff on a simple data set (without saving precision) + cov = ShrunkCovariance(shrinkage=0.5, store_precision=False) + cov.fit(X) + assert cov.precision_ is None + + +def test_ledoit_wolf(): + # Tests LedoitWolf module on a simple dataset. + # test shrinkage coeff on a simple data set + X_centered = X - X.mean(axis=0) + lw = LedoitWolf(assume_centered=True) + lw.fit(X_centered) + shrinkage_ = lw.shrinkage_ + + score_ = lw.score(X_centered) + assert_almost_equal( + ledoit_wolf_shrinkage(X_centered, assume_centered=True), shrinkage_ + ) + assert_almost_equal( + ledoit_wolf_shrinkage(X_centered, assume_centered=True, block_size=6), + shrinkage_, + ) + # compare shrunk covariance obtained from data and from MLE estimate + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf( + X_centered, assume_centered=True + ) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + # compare estimates given by LW and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True) + scov.fit(X_centered) + assert_array_almost_equal(scov.covariance_, lw.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + lw = LedoitWolf(assume_centered=True) + lw.fit(X_1d) + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d, assume_centered=True) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + assert_array_almost_equal((X_1d**2).sum() / n_samples, lw.covariance_, 4) + + # test shrinkage coeff on a simple data set (without saving precision) + lw = LedoitWolf(store_precision=False, assume_centered=True) + lw.fit(X_centered) + assert_almost_equal(lw.score(X_centered), score_, 4) + assert lw.precision_ is None + + # Same tests without assuming centered data + # test shrinkage coeff on a simple data set + lw = LedoitWolf() + lw.fit(X) + assert_almost_equal(lw.shrinkage_, shrinkage_, 4) + assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X)) + assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1]) + assert_almost_equal( + lw.shrinkage_, _ledoit_wolf(X=X, assume_centered=False, block_size=10000)[1] + ) + assert_almost_equal(lw.score(X), score_, 4) + # compare shrunk covariance obtained from data and from MLE estimate + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + # compare estimates given by LW and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=lw.shrinkage_) + scov.fit(X) + assert_array_almost_equal(scov.covariance_, lw.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + lw = LedoitWolf() + lw.fit(X_1d) + assert_allclose( + X_1d.var(ddof=0), + _ledoit_wolf(X=X_1d, assume_centered=False, block_size=10000)[0], + ) + lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d) + assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) + assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_) + assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4) + + # test with one sample + # warning should be raised when using only 1 sample + X_1sample = np.arange(5).reshape(1, 5) + lw = LedoitWolf() + + warn_msg = "Only one sample available. You may want to reshape your data array" + with pytest.warns(UserWarning, match=warn_msg): + lw.fit(X_1sample) + + assert_array_almost_equal(lw.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) + + # test shrinkage coeff on a simple data set (without saving precision) + lw = LedoitWolf(store_precision=False) + lw.fit(X) + assert_almost_equal(lw.score(X), score_, 4) + assert lw.precision_ is None + + +def _naive_ledoit_wolf_shrinkage(X): + # A simple implementation of the formulas from Ledoit & Wolf + + # The computation below achieves the following computations of the + # "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for + # Large-Dimensional Covariance Matrices" + # beta and delta are given in the beginning of section 3.2 + n_samples, n_features = X.shape + emp_cov = empirical_covariance(X, assume_centered=False) + mu = np.trace(emp_cov) / n_features + delta_ = emp_cov.copy() + delta_.flat[:: n_features + 1] -= mu + delta = (delta_**2).sum() / n_features + X2 = X**2 + beta_ = ( + 1.0 + / (n_features * n_samples) + * np.sum(np.dot(X2.T, X2) / n_samples - emp_cov**2) + ) + + beta = min(beta_, delta) + shrinkage = beta / delta + return shrinkage + + +def test_ledoit_wolf_small(): + # Compare our blocked implementation to the naive implementation + X_small = X[:, :4] + lw = LedoitWolf() + lw.fit(X_small) + shrinkage_ = lw.shrinkage_ + + assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small)) + + +def test_ledoit_wolf_large(): + # test that ledoit_wolf doesn't error on data that is wider than block_size + rng = np.random.RandomState(0) + # use a number of features that is larger than the block-size + X = rng.normal(size=(10, 20)) + lw = LedoitWolf(block_size=10).fit(X) + # check that covariance is about diagonal (random normal noise) + assert_almost_equal(lw.covariance_, np.eye(20), 0) + cov = lw.covariance_ + + # check that the result is consistent with not splitting data into blocks. + lw = LedoitWolf(block_size=25).fit(X) + assert_almost_equal(lw.covariance_, cov) + + +@pytest.mark.parametrize( + "ledoit_wolf_fitting_function", [LedoitWolf().fit, ledoit_wolf_shrinkage] +) +def test_ledoit_wolf_empty_array(ledoit_wolf_fitting_function): + """Check that we validate X and raise proper error with 0-sample array.""" + X_empty = np.zeros((0, 2)) + with pytest.raises(ValueError, match="Found array with 0 sample"): + ledoit_wolf_fitting_function(X_empty) + + +def test_oas(): + # Tests OAS module on a simple dataset. + # test shrinkage coeff on a simple data set + X_centered = X - X.mean(axis=0) + oa = OAS(assume_centered=True) + oa.fit(X_centered) + shrinkage_ = oa.shrinkage_ + score_ = oa.score(X_centered) + # compare shrunk covariance obtained from data and from MLE estimate + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered, assume_centered=True) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + # compare estimates given by OAS and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True) + scov.fit(X_centered) + assert_array_almost_equal(scov.covariance_, oa.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0:1] + oa = OAS(assume_centered=True) + oa.fit(X_1d) + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + assert_array_almost_equal((X_1d**2).sum() / n_samples, oa.covariance_, 4) + + # test shrinkage coeff on a simple data set (without saving precision) + oa = OAS(store_precision=False, assume_centered=True) + oa.fit(X_centered) + assert_almost_equal(oa.score(X_centered), score_, 4) + assert oa.precision_ is None + + # Same tests without assuming centered data-------------------------------- + # test shrinkage coeff on a simple data set + oa = OAS() + oa.fit(X) + assert_almost_equal(oa.shrinkage_, shrinkage_, 4) + assert_almost_equal(oa.score(X), score_, 4) + # compare shrunk covariance obtained from data and from MLE estimate + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + # compare estimates given by OAS and ShrunkCovariance + scov = ShrunkCovariance(shrinkage=oa.shrinkage_) + scov.fit(X) + assert_array_almost_equal(scov.covariance_, oa.covariance_, 4) + + # test with n_features = 1 + X_1d = X[:, 0].reshape((-1, 1)) + oa = OAS() + oa.fit(X_1d) + oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d) + assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_) + assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4) + + # test with one sample + # warning should be raised when using only 1 sample + X_1sample = np.arange(5).reshape(1, 5) + oa = OAS() + warn_msg = "Only one sample available. You may want to reshape your data array" + with pytest.warns(UserWarning, match=warn_msg): + oa.fit(X_1sample) + + assert_array_almost_equal(oa.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) + + # test shrinkage coeff on a simple data set (without saving precision) + oa = OAS(store_precision=False) + oa.fit(X) + assert_almost_equal(oa.score(X), score_, 4) + assert oa.precision_ is None + + # test function _oas without assuming centered data + X_1f = X[:, 0:1] + oa = OAS() + oa.fit(X_1f) + # compare shrunk covariance obtained from data and from MLE estimate + _oa_cov_from_mle, _oa_shrinkage_from_mle = _oas(X_1f) + assert_array_almost_equal(_oa_cov_from_mle, oa.covariance_, 4) + assert_almost_equal(_oa_shrinkage_from_mle, oa.shrinkage_) + assert_array_almost_equal((X_1f**2).sum() / n_samples, oa.covariance_, 4) + + +def test_EmpiricalCovariance_validates_mahalanobis(): + """Checks that EmpiricalCovariance validates data with mahalanobis.""" + cov = EmpiricalCovariance().fit(X) + + msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input" + with pytest.raises(ValueError, match=msg): + cov.mahalanobis(X[:, :2]) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py new file mode 100644 index 0000000000000000000000000000000000000000..ca85717fb378243ff8dcb75db1adade9a6c50c18 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_elliptic_envelope.py @@ -0,0 +1,52 @@ +""" +Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope). +""" + +import numpy as np +import pytest + +from sklearn.covariance import EllipticEnvelope +from sklearn.exceptions import NotFittedError +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) + + +def test_elliptic_envelope(global_random_seed): + rnd = np.random.RandomState(global_random_seed) + X = rnd.randn(100, 10) + clf = EllipticEnvelope(contamination=0.1) + with pytest.raises(NotFittedError): + clf.predict(X) + with pytest.raises(NotFittedError): + clf.decision_function(X) + clf.fit(X) + y_pred = clf.predict(X) + scores = clf.score_samples(X) + decisions = clf.decision_function(X) + + assert_array_almost_equal(scores, -clf.mahalanobis(X)) + assert_array_almost_equal(clf.mahalanobis(X), clf.dist_) + assert_almost_equal( + clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.0 + ) + assert sum(y_pred == -1) == sum(decisions < 0) + + +def test_score_samples(): + X_train = [[1, 1], [1, 2], [2, 1]] + clf1 = EllipticEnvelope(contamination=0.2).fit(X_train) + clf2 = EllipticEnvelope().fit(X_train) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), + clf1.decision_function([[2.0, 2.0]]) + clf1.offset_, + ) + assert_array_equal( + clf2.score_samples([[2.0, 2.0]]), + clf2.decision_function([[2.0, 2.0]]) + clf2.offset_, + ) + assert_array_equal( + clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]]) + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py new file mode 100644 index 0000000000000000000000000000000000000000..9698b64bf4407e216229b9e55fa4cd19896af823 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_graphical_lasso.py @@ -0,0 +1,318 @@ +"""Test the graphical_lasso module.""" + +import sys +from io import StringIO + +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy import linalg + +from sklearn import config_context, datasets +from sklearn.covariance import ( + GraphicalLasso, + GraphicalLassoCV, + empirical_covariance, + graphical_lasso, +) +from sklearn.datasets import make_sparse_spd_matrix +from sklearn.model_selection import GroupKFold +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + _convert_container, + assert_array_almost_equal, + assert_array_less, +) + + +def test_graphical_lassos(random_state=1): + """Test the graphical lasso solvers. + + This checks is unstable for some random seeds where the covariance found with "cd" + and "lars" solvers are different (4 cases / 100 tries). + """ + # Sample data from a sparse multivariate normal + dim = 20 + n_samples = 100 + random_state = check_random_state(random_state) + prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state) + cov = linalg.inv(prec) + X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples) + emp_cov = empirical_covariance(X) + + for alpha in (0.0, 0.1, 0.25): + covs = dict() + icovs = dict() + for method in ("cd", "lars"): + cov_, icov_, costs = graphical_lasso( + emp_cov, return_costs=True, alpha=alpha, mode=method + ) + covs[method] = cov_ + icovs[method] = icov_ + costs, dual_gap = np.array(costs).T + # Check that the costs always decrease (doesn't hold if alpha == 0) + if not alpha == 0: + # use 1e-12 since the cost can be exactly 0 + assert_array_less(np.diff(costs), 1e-12) + # Check that the 2 approaches give similar results + assert_allclose(covs["cd"], covs["lars"], atol=5e-4) + assert_allclose(icovs["cd"], icovs["lars"], atol=5e-4) + + # Smoke test the estimator + model = GraphicalLasso(alpha=0.25).fit(X) + model.score(X) + assert_array_almost_equal(model.covariance_, covs["cd"], decimal=4) + assert_array_almost_equal(model.covariance_, covs["lars"], decimal=4) + + # For a centered matrix, assume_centered could be chosen True or False + # Check that this returns indeed the same result for centered data + Z = X - X.mean(0) + precs = list() + for assume_centered in (False, True): + prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_ + precs.append(prec_) + assert_array_almost_equal(precs[0], precs[1]) + + +def test_graphical_lasso_when_alpha_equals_0(): + """Test graphical_lasso's early return condition when alpha=0.""" + X = np.random.randn(100, 10) + emp_cov = empirical_covariance(X, assume_centered=True) + + model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov) + assert_allclose(model.precision_, np.linalg.inv(emp_cov)) + + _, precision = graphical_lasso(emp_cov, alpha=0) + assert_allclose(precision, np.linalg.inv(emp_cov)) + + +@pytest.mark.parametrize("mode", ["cd", "lars"]) +def test_graphical_lasso_n_iter(mode): + X, _ = datasets.make_classification(n_samples=5_000, n_features=20, random_state=0) + emp_cov = empirical_covariance(X) + + _, _, n_iter = graphical_lasso( + emp_cov, 0.2, mode=mode, max_iter=2, return_n_iter=True + ) + assert n_iter == 2 + + +def test_graphical_lasso_iris(): + # Hard-coded solution from R glasso package for alpha=1.0 + # (need to set penalize.diagonal to FALSE) + cov_R = np.array( + [ + [0.68112222, 0.0000000, 0.265820, 0.02464314], + [0.00000000, 0.1887129, 0.000000, 0.00000000], + [0.26582000, 0.0000000, 3.095503, 0.28697200], + [0.02464314, 0.0000000, 0.286972, 0.57713289], + ] + ) + icov_R = np.array( + [ + [1.5190747, 0.000000, -0.1304475, 0.0000000], + [0.0000000, 5.299055, 0.0000000, 0.0000000], + [-0.1304475, 0.000000, 0.3498624, -0.1683946], + [0.0000000, 0.000000, -0.1683946, 1.8164353], + ] + ) + X = datasets.load_iris().data + emp_cov = empirical_covariance(X) + for method in ("cd", "lars"): + cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method) + assert_array_almost_equal(cov, cov_R) + assert_array_almost_equal(icov, icov_R) + + +def test_graph_lasso_2D(): + # Hard-coded solution from Python skggm package + # obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)` + cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]]) + + icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]]) + X = datasets.load_iris().data[:, 2:] + emp_cov = empirical_covariance(X) + for method in ("cd", "lars"): + cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method) + assert_array_almost_equal(cov, cov_skggm) + assert_array_almost_equal(icov, icov_skggm) + + +def test_graphical_lasso_iris_singular(): + # Small subset of rows to test the rank-deficient case + # Need to choose samples such that none of the variances are zero + indices = np.arange(10, 13) + + # Hard-coded solution from R glasso package for alpha=0.01 + cov_R = np.array( + [ + [0.08, 0.056666662595, 0.00229729713223, 0.00153153142149], + [0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222], + [0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009], + [0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222], + ] + ) + icov_R = np.array( + [ + [24.42244057, -16.831679593, 0.0, 0.0], + [-16.83168201, 24.351841681, -6.206896552, -12.5], + [0.0, -6.206896171, 153.103448276, 0.0], + [0.0, -12.499999143, 0.0, 462.5], + ] + ) + X = datasets.load_iris().data[indices, :] + emp_cov = empirical_covariance(X) + for method in ("cd", "lars"): + cov, icov = graphical_lasso( + emp_cov, alpha=0.01, return_costs=False, mode=method + ) + assert_array_almost_equal(cov, cov_R, decimal=5) + assert_array_almost_equal(icov, icov_R, decimal=5) + + +def test_graphical_lasso_cv(random_state=1): + # Sample data from a sparse multivariate normal + dim = 5 + n_samples = 6 + random_state = check_random_state(random_state) + prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state) + cov = linalg.inv(prec) + X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples) + # Capture stdout, to smoke test the verbose mode + orig_stdout = sys.stdout + try: + sys.stdout = StringIO() + # We need verbose very high so that Parallel prints on stdout + GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X) + finally: + sys.stdout = orig_stdout + + +@pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"]) +def test_graphical_lasso_cv_alphas_iterable(alphas_container_type): + """Check that we can pass an array-like to `alphas`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/22489 + """ + true_cov = np.array( + [ + [0.8, 0.0, 0.2, 0.0], + [0.0, 0.4, 0.0, 0.0], + [0.2, 0.0, 0.3, 0.1], + [0.0, 0.0, 0.1, 0.7], + ] + ) + rng = np.random.RandomState(0) + X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) + alphas = _convert_container([0.02, 0.03], alphas_container_type) + GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X) + + +@pytest.mark.parametrize( + "alphas,err_type,err_msg", + [ + ([-0.02, 0.03], ValueError, "must be > 0"), + ([0, 0.03], ValueError, "must be > 0"), + (["not_number", 0.03], TypeError, "must be an instance of float"), + ], +) +def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg): + """Check that if an array-like containing a value + outside of (0, inf] is passed to `alphas`, a ValueError is raised. + Check if a string is passed, a TypeError is raised. + """ + true_cov = np.array( + [ + [0.8, 0.0, 0.2, 0.0], + [0.0, 0.4, 0.0, 0.0], + [0.2, 0.0, 0.3, 0.1], + [0.0, 0.0, 0.1, 0.7], + ] + ) + rng = np.random.RandomState(0) + X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) + + with pytest.raises(err_type, match=err_msg): + GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X) + + +def test_graphical_lasso_cv_scores(): + splits = 4 + n_alphas = 5 + n_refinements = 3 + true_cov = np.array( + [ + [0.8, 0.0, 0.2, 0.0], + [0.0, 0.4, 0.0, 0.0], + [0.2, 0.0, 0.3, 0.1], + [0.0, 0.0, 0.1, 0.7], + ] + ) + rng = np.random.RandomState(0) + X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200) + cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit( + X + ) + + _assert_graphical_lasso_cv_scores( + cov=cov, + n_splits=splits, + n_refinements=n_refinements, + n_alphas=n_alphas, + ) + + +@config_context(enable_metadata_routing=True) +def test_graphical_lasso_cv_scores_with_routing(global_random_seed): + """Check that `GraphicalLassoCV` internally dispatches metadata to + the splitter. + """ + splits = 5 + n_alphas = 5 + n_refinements = 3 + true_cov = np.array( + [ + [0.8, 0.0, 0.2, 0.0], + [0.0, 0.4, 0.0, 0.0], + [0.2, 0.0, 0.3, 0.1], + [0.0, 0.0, 0.1, 0.7], + ] + ) + rng = np.random.RandomState(global_random_seed) + X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=300) + n_samples = X.shape[0] + groups = rng.randint(0, 5, n_samples) + params = {"groups": groups} + cv = GroupKFold(n_splits=splits) + cv.set_split_request(groups=True) + + cov = GraphicalLassoCV(cv=cv, alphas=n_alphas, n_refinements=n_refinements).fit( + X, **params + ) + + _assert_graphical_lasso_cv_scores( + cov=cov, + n_splits=splits, + n_refinements=n_refinements, + n_alphas=n_alphas, + ) + + +def _assert_graphical_lasso_cv_scores(cov, n_splits, n_refinements, n_alphas): + cv_results = cov.cv_results_ + # alpha and one for each split + + total_alphas = n_refinements * n_alphas + 1 + keys = ["alphas"] + split_keys = [f"split{i}_test_score" for i in range(n_splits)] + for key in keys + split_keys: + assert key in cv_results + assert len(cv_results[key]) == total_alphas + + cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys]) + expected_mean = cv_scores.mean(axis=0) + expected_std = cv_scores.std(axis=0) + + assert_allclose(cov.cv_results_["mean_test_score"], expected_mean) + assert_allclose(cov.cv_results_["std_test_score"], expected_std) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py new file mode 100644 index 0000000000000000000000000000000000000000..ebeb2c6e5aa6bf6f2316792b6cb5ba1c330d47bc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/covariance/tests/test_robust_covariance.py @@ -0,0 +1,168 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import itertools + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.covariance import MinCovDet, empirical_covariance, fast_mcd +from sklearn.utils._testing import assert_array_almost_equal + +X = datasets.load_iris().data +X_1d = X[:, 0] +n_samples, n_features = X.shape + + +def test_mcd(global_random_seed): + # Tests the FastMCD algorithm implementation + # Small data set + # test without outliers (random independent normal data) + launch_mcd_on_dataset(100, 5, 0, 0.02, 0.1, 75, global_random_seed) + # test with a contaminated data set (medium contamination) + launch_mcd_on_dataset(100, 5, 20, 0.3, 0.3, 65, global_random_seed) + # test with a contaminated data set (strong contamination) + launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50, global_random_seed) + + # Medium data set + launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540, global_random_seed) + + # Large data set + launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870, global_random_seed) + + # 1D data set + launch_mcd_on_dataset(500, 1, 100, 0.02, 0.02, 350, global_random_seed) + + +def test_fast_mcd_on_invalid_input(): + X = np.arange(100) + msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=msg): + fast_mcd(X) + + +def test_mcd_class_on_invalid_input(): + X = np.arange(100) + mcd = MinCovDet() + msg = "Expected 2D array, got 1D array instead" + with pytest.raises(ValueError, match=msg): + mcd.fit(X) + + +def launch_mcd_on_dataset( + n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support, seed +): + rand_gen = np.random.RandomState(seed) + data = rand_gen.randn(n_samples, n_features) + # add some outliers + outliers_index = rand_gen.permutation(n_samples)[:n_outliers] + outliers_offset = 10.0 * (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5) + data[outliers_index] += outliers_offset + inliers_mask = np.ones(n_samples).astype(bool) + inliers_mask[outliers_index] = False + + pure_data = data[inliers_mask] + # compute MCD by fitting an object + mcd_fit = MinCovDet(random_state=seed).fit(data) + T = mcd_fit.location_ + S = mcd_fit.covariance_ + H = mcd_fit.support_ + # compare with the estimates learnt from the inliers + error_location = np.mean((pure_data.mean(0) - T) ** 2) + assert error_location < tol_loc + error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2) + assert error_cov < tol_cov + assert np.sum(H) >= tol_support + assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_) + + +def test_mcd_issue1127(): + # Check that the code does not break with X.shape = (3, 1) + # (i.e. n_support = n_samples) + rnd = np.random.RandomState(0) + X = rnd.normal(size=(3, 1)) + mcd = MinCovDet() + mcd.fit(X) + + +def test_mcd_issue3367(global_random_seed): + # Check that MCD completes when the covariance matrix is singular + # i.e. one of the rows and columns are all zeros + rand_gen = np.random.RandomState(global_random_seed) + + # Think of these as the values for X and Y -> 10 values between -5 and 5 + data_values = np.linspace(-5, 5, 10).tolist() + # Get the cartesian product of all possible coordinate pairs from above set + data = np.array(list(itertools.product(data_values, data_values))) + + # Add a third column that's all zeros to make our data a set of point + # within a plane, which means that the covariance matrix will be singular + data = np.hstack((data, np.zeros((data.shape[0], 1)))) + + # The below line of code should raise an exception if the covariance matrix + # is singular. As a further test, since we have points in XYZ, the + # principle components (Eigenvectors) of these directly relate to the + # geometry of the points. Since it's a plane, we should be able to test + # that the Eigenvector that corresponds to the smallest Eigenvalue is the + # plane normal, specifically [0, 0, 1], since everything is in the XY plane + # (as I've set it up above). To do this one would start by: + # + # evals, evecs = np.linalg.eigh(mcd_fit.covariance_) + # normal = evecs[:, np.argmin(evals)] + # + # After which we need to assert that our `normal` is equal to [0, 0, 1]. + # Do note that there is floating point error associated with this, so it's + # best to subtract the two and then compare some small tolerance (e.g. + # 1e-12). + MinCovDet(random_state=rand_gen).fit(data) + + +def test_mcd_support_covariance_is_zero(): + # Check that MCD returns a ValueError with informative message when the + # covariance of the support data is equal to 0. + X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1]) + X_1 = X_1.reshape(-1, 1) + X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3]) + X_2 = X_2.reshape(-1, 1) + msg = ( + "The covariance matrix of the support data is equal to 0, try to " + "increase support_fraction" + ) + for X in [X_1, X_2]: + with pytest.raises(ValueError, match=msg): + MinCovDet().fit(X) + + +def test_mcd_increasing_det_warning(global_random_seed): + # Check that a warning is raised if we observe increasing determinants + # during the c_step. In theory the sequence of determinants should be + # decreasing. Increasing determinants are likely due to ill-conditioned + # covariance matrices that result in poor precision matrices. + + X = [ + [5.1, 3.5, 1.4, 0.2], + [4.9, 3.0, 1.4, 0.2], + [4.7, 3.2, 1.3, 0.2], + [4.6, 3.1, 1.5, 0.2], + [5.0, 3.6, 1.4, 0.2], + [4.6, 3.4, 1.4, 0.3], + [5.0, 3.4, 1.5, 0.2], + [4.4, 2.9, 1.4, 0.2], + [4.9, 3.1, 1.5, 0.1], + [5.4, 3.7, 1.5, 0.2], + [4.8, 3.4, 1.6, 0.2], + [4.8, 3.0, 1.4, 0.1], + [4.3, 3.0, 1.1, 0.1], + [5.1, 3.5, 1.4, 0.3], + [5.7, 3.8, 1.7, 0.3], + [5.4, 3.4, 1.7, 0.2], + [4.6, 3.6, 1.0, 0.2], + [5.0, 3.0, 1.6, 0.2], + [5.2, 3.5, 1.5, 0.2], + ] + + mcd = MinCovDet(support_fraction=0.5, random_state=global_random_seed) + warn_msg = "Determinant has increased" + with pytest.warns(RuntimeWarning, match=warn_msg): + mcd.fit(X) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01930f833969297ae95bbd5e9bb5ebf7173c7a73 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_base.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73aef88de85f6bddb593d818d9f0d2c773403050 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_dict_learning.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..882f3059ebcc467aaa8b623d6cccb6866e85405c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_factor_analysis.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ad9bb0857398340b08df0067c3603557c5cd1b9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_fastica.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..185aa3b4047982e89a4fd154df0325db205a81a3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_kernel_pca.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f65247fdf3ba8be07340a36fa7c67af3033f3956 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_lda.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb49d0a33aa1f79f79cfba5a7dfb85ad4dba9afb Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_nmf.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44bf2fa10648b3b87882d99d020a34d85e78bb15 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_pca.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b863755531b39697ef92defbd0d8d70008bfa21 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_sparse_pca.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..157d7316e1321c128fff6eb42f0718b5add4cd99 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/__pycache__/_truncated_svd.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d78fee7ad3cbd2f59e66c340b0ddca3967ba8ab3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56454db3b1133f9c96556bf2d72e1efc514f9ffd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_dict_learning.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3835689c7af6f58f348fc80160ea5452dc8779f6 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_factor_analysis.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae9bf412e460e0ee67b069aed010081bf834b526 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_incremental_pca.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43ac021c420ecab0297a2da228255ed0ee4d91c5 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_nmf.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..589a26feb68458e705efa014c06e5937403d83bd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_online_lda.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2744613f6c1b2e137084d8268c217b0227981514 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_sparse_pca.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5bc006542e6c3c65edc417e5b954c0493ee75f8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/__pycache__/test_truncated_svd.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py new file mode 100644 index 0000000000000000000000000000000000000000..b222cf4e158ff7059c6e0c43fff678d907b82fea --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/decomposition/tests/test_kernel_pca.py @@ -0,0 +1,566 @@ +import warnings + +import numpy as np +import pytest + +import sklearn +from sklearn.datasets import load_iris, make_blobs, make_circles +from sklearn.decomposition import PCA, KernelPCA +from sklearn.exceptions import NotFittedError +from sklearn.linear_model import Perceptron +from sklearn.metrics.pairwise import rbf_kernel +from sklearn.model_selection import GridSearchCV +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS +from sklearn.utils.validation import _check_psd_eigenvalues + + +def test_kernel_pca(): + """Nominal test for all solvers and all known kernels + a custom one + + It tests + - that fit_transform is equivalent to fit+transform + - that the shapes of transforms and inverse transforms are correct + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + def histogram(x, y, **kwargs): + # Histogram kernel implemented as a callable. + assert kwargs == {} # no kernel_params that we didn't ask for + return np.minimum(x, y).sum() + + for eigen_solver in ("auto", "dense", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly", histogram): + # histogram kernel produces singular matrix inside linalg.solve + # XXX use a least-squares approximation? + inv = not callable(kernel) + + # transform fit data + kpca = KernelPCA( + 4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # non-regression test: previously, gamma would be 0 by default, + # forcing all eigenvalues to 0 under the poly kernel + assert X_fit_transformed.size != 0 + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform + if inv: + X_pred2 = kpca.inverse_transform(X_pred_transformed) + assert X_pred2.shape == X_pred.shape + + +def test_kernel_pca_invalid_parameters(): + """Check that kPCA raises an error if the parameters are invalid + + Tests fitting inverse transform with a precomputed kernel raises a + ValueError. + """ + estimator = KernelPCA( + n_components=10, fit_inverse_transform=True, kernel="precomputed" + ) + err_ms = "Cannot fit_inverse_transform with a precomputed kernel" + with pytest.raises(ValueError, match=err_ms): + estimator.fit(np.random.randn(10, 10)) + + +def test_kernel_pca_consistent_transform(): + """Check robustness to mutations in the original training array + + Test that after fitting a kPCA model, it stays independent of any + mutation of the values of the original data object by relying on an + internal copy. + """ + # X_fit_ needs to retain the old, unmodified copy of X + state = np.random.RandomState(0) + X = state.rand(10, 10) + kpca = KernelPCA(random_state=state).fit(X) + transformed1 = kpca.transform(X) + + X_copy = X.copy() + X[:, 0] = 666 + transformed2 = kpca.transform(X_copy) + assert_array_almost_equal(transformed1, transformed2) + + +def test_kernel_pca_deterministic_output(): + """Test that Kernel PCA produces deterministic output + + Tests that the same inputs and random state produce the same output. + """ + rng = np.random.RandomState(0) + X = rng.rand(10, 10) + eigen_solver = ("arpack", "dense") + + for solver in eigen_solver: + transformed_X = np.zeros((20, 2)) + for i in range(20): + kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng) + transformed_X[i, :] = kpca.fit_transform(X)[0] + assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_kernel_pca_sparse(csr_container): + """Test that kPCA works on a sparse data input. + + Same test as ``test_kernel_pca except inverse_transform`` since it's not + implemented for sparse matrices. + """ + rng = np.random.RandomState(0) + X_fit = csr_container(rng.random_sample((5, 4))) + X_pred = csr_container(rng.random_sample((2, 4))) + + for eigen_solver in ("auto", "arpack", "randomized"): + for kernel in ("linear", "rbf", "poly"): + # transform fit data + kpca = KernelPCA( + 4, + kernel=kernel, + eigen_solver=eigen_solver, + fit_inverse_transform=False, + random_state=0, + ) + X_fit_transformed = kpca.fit_transform(X_fit) + X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit) + assert_array_almost_equal( + np.abs(X_fit_transformed), np.abs(X_fit_transformed2) + ) + + # transform new data + X_pred_transformed = kpca.transform(X_pred) + assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1] + + # inverse transform: not available for sparse matrices + # XXX: should we raise another exception type here? For instance: + # NotImplementedError. + with pytest.raises(NotFittedError): + kpca.inverse_transform(X_pred_transformed) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +@pytest.mark.parametrize("n_features", [4, 10]) +def test_kernel_pca_linear_kernel(solver, n_features): + """Test that kPCA with linear kernel is equivalent to PCA for all solvers. + + KernelPCA with linear kernel should produce the same output as PCA. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, n_features)) + X_pred = rng.random_sample((2, n_features)) + + # for a linear kernel, kernel PCA should find the same projection as PCA + # modulo the sign (direction) + # fit only the first four components: fifth is near zero eigenvalue, so + # can be trimmed due to roundoff error + n_comps = 3 if solver == "arpack" else 4 + assert_array_almost_equal( + np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)), + np.abs( + PCA(n_comps, svd_solver=solver if solver != "dense" else "full") + .fit(X_fit) + .transform(X_pred) + ), + ) + + +def test_kernel_pca_n_components(): + """Test that `n_components` is correctly taken into account for projections + + For all solvers this tests that the output has the correct shape depending + on the selected number of components. + """ + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + for c in [1, 2, 4]: + kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver) + shape = kpca.fit(X_fit).transform(X_pred).shape + + assert shape == (2, c) + + +def test_remove_zero_eig(): + """Check that the ``remove_zero_eig`` parameter works correctly. + + Tests that the null-space (Zero) eigenvalues are removed when + remove_zero_eig=True, whereas they are not by default. + """ + X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]]) + + # n_components=None (default) => remove_zero_eig is True + kpca = KernelPCA() + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + kpca = KernelPCA(n_components=2) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 2) + + kpca = KernelPCA(n_components=2, remove_zero_eig=True) + Xt = kpca.fit_transform(X) + assert Xt.shape == (3, 0) + + +def test_leave_zero_eig(): + """Non-regression test for issue #12141 (PR #12143) + + This test checks that fit().transform() returns the same result as + fit_transform() in case of non-removed zero eigenvalue. + """ + X_fit = np.array([[1, 1], [0, 0]]) + + # Assert that even with all np warnings on, there is no div by zero warning + with warnings.catch_warnings(): + # There might be warnings about the kernel being badly conditioned, + # but there should not be warnings about division by zero. + # (Numpy division by zero warning can have many message variants, but + # at least we know that it is a RuntimeWarning so lets check only this) + warnings.simplefilter("error", RuntimeWarning) + with np.errstate(all="warn"): + k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense") + # Fit, then transform + A = k.fit(X_fit).transform(X_fit) + # Do both at once + B = k.fit_transform(X_fit) + # Compare + assert_array_almost_equal(np.abs(A), np.abs(B)) + + +def test_kernel_pca_precomputed(): + """Test that kPCA works with a precomputed kernel, for all solvers""" + rng = np.random.RandomState(0) + X_fit = rng.random_sample((5, 4)) + X_pred = rng.random_sample((2, 4)) + + for eigen_solver in ("dense", "arpack", "randomized"): + X_kpca = ( + KernelPCA(4, eigen_solver=eigen_solver, random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + X_kpca2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_pred, X_fit.T)) + ) + + X_kpca_train = KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ).fit_transform(np.dot(X_fit, X_fit.T)) + + X_kpca_train2 = ( + KernelPCA( + 4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0 + ) + .fit(np.dot(X_fit, X_fit.T)) + .transform(np.dot(X_fit, X_fit.T)) + ) + + assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2)) + + assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_kernel_pca_precomputed_non_symmetric(solver): + """Check that the kernel centerer works. + + Tests that a non symmetric precomputed kernel is actually accepted + because the kernel centerer does its job correctly. + """ + + # a non symmetric gram matrix + K = [[1, 2], [3, 40]] + kpca = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca.fit(K) # no error + + # same test with centered kernel + Kc = [[9, -9], [-9, 9]] + kpca_c = KernelPCA( + kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0 + ) + kpca_c.fit(Kc) + + # comparison between the non-centered and centered versions + assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_) + assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_) + + +def test_gridsearch_pipeline(): + """Check that kPCA works as expected in a grid search pipeline + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="rbf", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + grid_search.fit(X, y) + assert grid_search.best_score_ == 1 + + +def test_gridsearch_pipeline_precomputed(): + """Check that kPCA works as expected in a grid search pipeline (2) + + Test if we can do a grid-search to find parameters to separate + circles with a perceptron model. This test uses a precomputed kernel. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + kpca = KernelPCA(kernel="precomputed", n_components=2) + pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))]) + param_grid = dict(Perceptron__max_iter=np.arange(1, 5)) + grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid) + X_kernel = rbf_kernel(X, gamma=2.0) + grid_search.fit(X_kernel, y) + assert grid_search.best_score_ == 1 + + +def test_nested_circles(): + """Check that kPCA projects in a space where nested circles are separable + + Tests that 2D nested circles become separable with a perceptron when + projected in the first 2 kPCA using an RBF kernel, while raw samples + are not directly separable in the original space. + """ + X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0) + + # 2D nested circles are not linearly separable + train_score = Perceptron(max_iter=5).fit(X, y).score(X, y) + assert train_score < 0.8 + + # Project the circles data into the first 2 components of a RBF Kernel + # PCA model. + # Note that the gamma value is data dependent. If this test breaks + # and the gamma value has to be updated, the Kernel PCA example will + # have to be updated too. + kpca = KernelPCA( + kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0 + ) + X_kpca = kpca.fit_transform(X) + + # The data is perfectly linearly separable in that space + train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y) + assert train_score == 1.0 + + +def test_kernel_conditioning(): + """Check that ``_check_psd_eigenvalues`` is correctly called in kPCA + + Non-regression test for issue #12140 (PR #12145). + """ + + # create a pathological X leading to small non-zero eigenvalue + X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]] + kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True) + kpca.fit(X) + + # check that the small non-zero eigenvalue was correctly set to zero + assert kpca.eigenvalues_.min() == 0 + assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_)) + + +@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"]) +def test_precomputed_kernel_not_psd(solver): + """Check how KernelPCA works with non-PSD kernels depending on n_components + + Tests for all methods what happens with a non PSD gram matrix (this + can happen in an isomap scenario, or with custom kernel functions, or + maybe with ill-posed datasets). + + When ``n_component`` is large enough to capture a negative eigenvalue, an + error should be raised. Otherwise, KernelPCA should run without error + since the negative eigenvalues are not selected. + """ + + # a non PSD kernel with large eigenvalues, already centered + # it was captured from an isomap call and multiplied by 100 for compacity + K = [ + [4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78], + [-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49], + [8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23], + [2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9], + [2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9], + [-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75], + [-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46], + ] + # this gram matrix has 5 positive eigenvalues and 3 negative ones + # [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11] + + # 1. ask for enough components to get a significant negative one + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7) + # make sure that the appropriate error is raised + with pytest.raises(ValueError, match="There are significant negative eigenvalues"): + kpca.fit(K) + + # 2. ask for a small enough n_components to get only positive ones + kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2) + if solver == "randomized": + # the randomized method is still inconsistent with the others on this + # since it selects the eigenvalues based on the largest 2 modules, not + # on the largest 2 values. + # + # At least we can ensure that we return an error instead of returning + # the wrong eigenvalues + with pytest.raises( + ValueError, match="There are significant negative eigenvalues" + ): + kpca.fit(K) + else: + # general case: make sure that it works + kpca.fit(K) + + +@pytest.mark.parametrize("n_components", [4, 10, 20]) +def test_kernel_pca_solvers_equivalence(n_components): + """Check that 'dense' 'arpack' & 'randomized' solvers give similar results""" + + # Generate random data + n_train, n_test = 1_000, 100 + X, _ = make_circles( + n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0 + ) + X_fit, X_pred = X[:n_train, :], X[n_train:, :] + + # reference (full) + ref_pred = ( + KernelPCA(n_components, eigen_solver="dense", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + + # arpack + a_pred = ( + KernelPCA(n_components, eigen_solver="arpack", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approx + assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred)) + + # randomized + r_pred = ( + KernelPCA(n_components, eigen_solver="randomized", random_state=0) + .fit(X_fit) + .transform(X_pred) + ) + # check that the result is still correct despite the approximation + assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred)) + + +def test_kernel_pca_inverse_transform_reconstruction(): + """Test if the reconstruction is a good approximation. + + Note that in general it is not possible to get an arbitrarily good + reconstruction because of kernel centering that does not + preserve all the information of the original data. + """ + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + + kpca = KernelPCA( + n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3 + ) + X_trans = kpca.fit_transform(X) + X_reconst = kpca.inverse_transform(X_trans) + assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1 + + +def test_kernel_pca_raise_not_fitted_error(): + X = np.random.randn(15).reshape(5, 3) + kpca = KernelPCA() + kpca.fit(X) + with pytest.raises(NotFittedError): + kpca.inverse_transform(X) + + +def test_32_64_decomposition_shape(): + """Test that the decomposition is similar for 32 and 64 bits data + + Non regression test for + https://github.com/scikit-learn/scikit-learn/issues/18146 + """ + X, y = make_blobs( + n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1 + ) + X = StandardScaler().fit_transform(X) + X -= X.min() + + # Compare the shapes (corresponds to the number of non-zero eigenvalues) + kpca = KernelPCA() + assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape + + +def test_kernel_pca_feature_names_out(): + """Check feature names out for KernelPCA.""" + X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0) + kpca = KernelPCA(n_components=2).fit(X) + + names = kpca.get_feature_names_out() + assert_array_equal([f"kernelpca{i}" for i in range(2)], names) + + +def test_kernel_pca_inverse_correct_gamma(): + """Check that gamma is set correctly when not provided. + + Non-regression test for #26280 + """ + rng = np.random.RandomState(0) + X = rng.random_sample((5, 4)) + + kwargs = { + "n_components": 2, + "random_state": rng, + "fit_inverse_transform": True, + "kernel": "rbf", + } + + expected_gamma = 1 / X.shape[1] + kpca1 = KernelPCA(gamma=None, **kwargs).fit(X) + kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X) + + assert kpca1.gamma_ == expected_gamma + assert kpca2.gamma_ == expected_gamma + + X1_recon = kpca1.inverse_transform(kpca1.transform(X)) + X2_recon = kpca2.inverse_transform(kpca1.transform(X)) + + assert_allclose(X1_recon, X2_recon) + + +def test_kernel_pca_pandas_output(): + """Check that KernelPCA works with pandas output when the solver is arpack. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/27579 + """ + pytest.importorskip("pandas") + X, _ = load_iris(as_frame=True, return_X_y=True) + with sklearn.config_context(transform_output="pandas"): + KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8dcbe3140415aaa00c0d807e6c9170f7f6be3b8a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/__init__.py @@ -0,0 +1,10 @@ +"""Gaussian process based regression and classification.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from . import kernels +from ._gpc import GaussianProcessClassifier +from ._gpr import GaussianProcessRegressor + +__all__ = ["GaussianProcessRegressor", "GaussianProcessClassifier", "kernels"] diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py new file mode 100644 index 0000000000000000000000000000000000000000..ff094e4d5f4fd699fd9f84bbce778e7cc4cbec9a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/_gpc.py @@ -0,0 +1,904 @@ +"""Gaussian processes classification.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from numbers import Integral +from operator import itemgetter + +import numpy as np +import scipy.optimize +from scipy.linalg import cho_solve, cholesky, solve +from scipy.special import erf, expit + +from ..base import BaseEstimator, ClassifierMixin, _fit_context, clone +from ..multiclass import OneVsOneClassifier, OneVsRestClassifier +from ..preprocessing import LabelEncoder +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.optimize import _check_optimize_result +from ..utils.validation import check_is_fitted, validate_data +from .kernels import RBF, CompoundKernel, Kernel +from .kernels import ConstantKernel as C + +# Values required for approximating the logistic sigmoid by +# error functions. coefs are obtained via: +# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf]) +# b = logistic(x) +# A = (erf(np.dot(x, self.lambdas)) + 1) / 2 +# coefs = lstsq(A, b)[0] +LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis] +COEFS = np.array( + [-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654] +)[:, np.newaxis] + + +class _BinaryGaussianProcessClassifierLaplace(BaseEstimator): + """Binary Gaussian process classification based on Laplace approximation. + + The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_. + + Internally, the Laplace approximation is used for approximating the + non-Gaussian posterior by a Gaussian. + + Currently, the implementation is restricted to using the logistic link + function. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : kernel instance, default=None + The kernel specifying the covariance function of the GP. If None is + passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that + the kernel's hyperparameters are optimized during fitting. + + optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b' + Can either be one of the internally supported optimizers for optimizing + the kernel's parameters, specified by a string, or an externally + defined optimizer passed as a callable. If a callable is passed, it + must have the signature:: + + def optimizer(obj_func, initial_theta, bounds): + # * 'obj_func' is the objective function to be maximized, which + # takes the hyperparameters theta as parameter and an + # optional flag eval_gradient, which determines if the + # gradient is returned additionally to the function value + # * 'initial_theta': the initial value for theta, which can be + # used by local optimizers + # * 'bounds': the bounds on the values of theta + .... + # Returned are the best found hyperparameters theta and + # the corresponding value of the target function. + return theta_opt, func_min + + Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize + is used. If None is passed, the kernel's parameters are kept fixed. + Available internal optimizers are:: + + 'fmin_l_bfgs_b' + + n_restarts_optimizer : int, default=0 + The number of restarts of the optimizer for finding the kernel's + parameters which maximize the log-marginal likelihood. The first run + of the optimizer is performed from the kernel's initial parameters, + the remaining ones (if any) from thetas sampled log-uniform randomly + from the space of allowed theta-values. If greater than 0, all bounds + must be finite. Note that n_restarts_optimizer=0 implies that one + run is performed. + + max_iter_predict : int, default=100 + The maximum number of iterations in Newton's method for approximating + the posterior during predict. Smaller values will reduce computation + time at the cost of worse results. + + warm_start : bool, default=False + If warm-starts are enabled, the solution of the last Newton iteration + on the Laplace approximation of the posterior mode is used as + initialization for the next call of _posterior_mode(). This can speed + up convergence when _posterior_mode is called several times on similar + problems as in hyperparameter optimization. See :term:`the Glossary + `. + + copy_X_train : bool, default=True + If True, a persistent copy of the training data is stored in the + object. Otherwise, just a reference to the training data is stored, + which might cause predictions to change if the data is modified + externally. + + random_state : int, RandomState instance or None, default=None + Determines random number generation used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + X_train_ : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data (also + required for prediction). + + y_train_ : array-like of shape (n_samples,) + Target values in training data (also required for prediction) + + classes_ : array-like of shape (n_classes,) + Unique class labels. + + kernel_ : kernl instance + The kernel used for prediction. The structure of the kernel is the + same as the one passed as parameter but with optimized hyperparameters + + L_ : array-like of shape (n_samples, n_samples) + Lower-triangular Cholesky decomposition of the kernel in X_train_ + + pi_ : array-like of shape (n_samples,) + The probabilities of the positive class for the training points + X_train_ + + W_sr_ : array-like of shape (n_samples,) + Square root of W, the Hessian of log-likelihood of the latent function + values for the observed labels. Since W is diagonal, only the diagonal + of sqrt(W) is stored. + + log_marginal_likelihood_value_ : float + The log-marginal-likelihood of ``self.kernel_.theta`` + + References + ---------- + .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, + "Gaussian Processes for Machine Learning", + MIT Press 2006 `_ + """ + + def __init__( + self, + kernel=None, + *, + optimizer="fmin_l_bfgs_b", + n_restarts_optimizer=0, + max_iter_predict=100, + warm_start=False, + copy_X_train=True, + random_state=None, + ): + self.kernel = kernel + self.optimizer = optimizer + self.n_restarts_optimizer = n_restarts_optimizer + self.max_iter_predict = max_iter_predict + self.warm_start = warm_start + self.copy_X_train = copy_X_train + self.random_state = random_state + + def fit(self, X, y): + """Fit Gaussian process classification model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data. + + y : array-like of shape (n_samples,) + Target values, must be binary. + + Returns + ------- + self : returns an instance of self. + """ + if self.kernel is None: # Use an RBF kernel as default + self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + else: + self.kernel_ = clone(self.kernel) + + self.rng = check_random_state(self.random_state) + + self.X_train_ = np.copy(X) if self.copy_X_train else X + + # Encode class labels and check that it is a binary classification + # problem + label_encoder = LabelEncoder() + self.y_train_ = label_encoder.fit_transform(y) + self.classes_ = label_encoder.classes_ + if self.classes_.size > 2: + raise ValueError( + "%s supports only binary classification. y contains classes %s" + % (self.__class__.__name__, self.classes_) + ) + elif self.classes_.size == 1: + raise ValueError( + "{0:s} requires 2 classes; got {1:d} class".format( + self.__class__.__name__, self.classes_.size + ) + ) + + if self.optimizer is not None and self.kernel_.n_dims > 0: + # Choose hyperparameters based on maximizing the log-marginal + # likelihood (potentially starting from several initial values) + def obj_func(theta, eval_gradient=True): + if eval_gradient: + lml, grad = self.log_marginal_likelihood( + theta, eval_gradient=True, clone_kernel=False + ) + return -lml, -grad + else: + return -self.log_marginal_likelihood(theta, clone_kernel=False) + + # First optimize starting from theta specified in kernel + optima = [ + self._constrained_optimization( + obj_func, self.kernel_.theta, self.kernel_.bounds + ) + ] + + # Additional runs are performed from log-uniform chosen initial + # theta + if self.n_restarts_optimizer > 0: + if not np.isfinite(self.kernel_.bounds).all(): + raise ValueError( + "Multiple optimizer restarts (n_restarts_optimizer>0) " + "requires that all bounds are finite." + ) + bounds = self.kernel_.bounds + for iteration in range(self.n_restarts_optimizer): + theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1])) + optima.append( + self._constrained_optimization(obj_func, theta_initial, bounds) + ) + # Select result from run with minimal (negative) log-marginal + # likelihood + lml_values = list(map(itemgetter(1), optima)) + self.kernel_.theta = optima[np.argmin(lml_values)][0] + self.kernel_._check_bounds_params() + + self.log_marginal_likelihood_value_ = -np.min(lml_values) + else: + self.log_marginal_likelihood_value_ = self.log_marginal_likelihood( + self.kernel_.theta + ) + + # Precompute quantities required for predictions which are independent + # of actual query points + K = self.kernel_(self.X_train_) + + _, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode( + K, return_temporaries=True + ) + + return self + + def predict(self, X): + """Perform classification on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : ndarray of shape (n_samples,) + Predicted target values for X, values are from ``classes_`` + """ + check_is_fitted(self) + + # As discussed on Section 3.4.2 of GPML, for making hard binary + # decisions, it is enough to compute the MAP of the posterior and + # pass it through the link function + K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) + f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4 + + return np.where(f_star > 0, self.classes_[1], self.classes_[0]) + + def predict_proba(self, X): + """Return probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute ``classes_``. + """ + check_is_fitted(self) + + # Based on Algorithm 3.2 of GPML + K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) + f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4 + v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5 + # Line 6 (compute np.diag(v.T.dot(v)) via einsum) + var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v) + + # Line 7: + # Approximate \int log(z) * N(z | f_star, var_f_star) + # Approximation is due to Williams & Barber, "Bayesian Classification + # with Gaussian Processes", Appendix A: Approximate the logistic + # sigmoid by a linear combination of 5 error functions. + # For information on how this integral can be computed see + # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html + alpha = 1 / (2 * var_f_star) + gamma = LAMBDAS * f_star + integrals = ( + np.sqrt(np.pi / alpha) + * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) + / (2 * np.sqrt(var_f_star * 2 * np.pi)) + ) + pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum() + + return np.vstack((1 - pi_star, pi_star)).T + + def log_marginal_likelihood( + self, theta=None, eval_gradient=False, clone_kernel=True + ): + """Returns log-marginal likelihood of theta for training data. + + Parameters + ---------- + theta : array-like of shape (n_kernel_params,), default=None + Kernel hyperparameters for which the log-marginal likelihood is + evaluated. If None, the precomputed log_marginal_likelihood + of ``self.kernel_.theta`` is returned. + + eval_gradient : bool, default=False + If True, the gradient of the log-marginal likelihood with respect + to the kernel hyperparameters at position theta is returned + additionally. If True, theta must not be None. + + clone_kernel : bool, default=True + If True, the kernel attribute is copied. If False, the kernel + attribute is modified, but may result in a performance improvement. + + Returns + ------- + log_likelihood : float + Log-marginal likelihood of theta for training data. + + log_likelihood_gradient : ndarray of shape (n_kernel_params,), \ + optional + Gradient of the log-marginal likelihood with respect to the kernel + hyperparameters at position theta. + Only returned when `eval_gradient` is True. + """ + if theta is None: + if eval_gradient: + raise ValueError("Gradient can only be evaluated for theta!=None") + return self.log_marginal_likelihood_value_ + + if clone_kernel: + kernel = self.kernel_.clone_with_theta(theta) + else: + kernel = self.kernel_ + kernel.theta = theta + + if eval_gradient: + K, K_gradient = kernel(self.X_train_, eval_gradient=True) + else: + K = kernel(self.X_train_) + + # Compute log-marginal-likelihood Z and also store some temporaries + # which can be reused for computing Z's gradient + Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True) + + if not eval_gradient: + return Z + + # Compute gradient based on Algorithm 5.1 of GPML + d_Z = np.empty(theta.shape[0]) + # XXX: Get rid of the np.diag() in the next line + R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7 + C = solve(L, W_sr[:, np.newaxis] * K) # Line 8 + # Line 9: (use einsum to compute np.diag(C.T.dot(C)))) + s_2 = ( + -0.5 + * (np.diag(K) - np.einsum("ij, ij -> j", C, C)) + * (pi * (1 - pi) * (1 - 2 * pi)) + ) # third derivative + + for j in range(d_Z.shape[0]): + C = K_gradient[:, :, j] # Line 11 + # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C))) + s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel()) + + b = C.dot(self.y_train_ - pi) # Line 13 + s_3 = b - K.dot(R.dot(b)) # Line 14 + + d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15 + + return Z, d_Z + + def _posterior_mode(self, K, return_temporaries=False): + """Mode-finding for binary Laplace GPC and fixed kernel. + + This approximates the posterior of the latent function values for given + inputs and target observations with a Gaussian approximation and uses + Newton's iteration to find the mode of this approximation. + """ + # Based on Algorithm 3.1 of GPML + + # If warm_start are enabled, we reuse the last solution for the + # posterior mode as initialization; otherwise, we initialize with 0 + if ( + self.warm_start + and hasattr(self, "f_cached") + and self.f_cached.shape == self.y_train_.shape + ): + f = self.f_cached + else: + f = np.zeros_like(self.y_train_, dtype=np.float64) + + # Use Newton's iteration method to find mode of Laplace approximation + log_marginal_likelihood = -np.inf + for _ in range(self.max_iter_predict): + # Line 4 + pi = expit(f) + W = pi * (1 - pi) + # Line 5 + W_sr = np.sqrt(W) + W_sr_K = W_sr[:, np.newaxis] * K + B = np.eye(W.shape[0]) + W_sr_K * W_sr + L = cholesky(B, lower=True) + # Line 6 + b = W * f + (self.y_train_ - pi) + # Line 7 + a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b)) + # Line 8 + f = K.dot(a) + + # Line 10: Compute log marginal likelihood in loop and use as + # convergence criterion + lml = ( + -0.5 * a.T.dot(f) + - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum() + - np.log(np.diag(L)).sum() + ) + # Check if we have converged (log marginal likelihood does + # not decrease) + # XXX: more complex convergence criterion + if lml - log_marginal_likelihood < 1e-10: + break + log_marginal_likelihood = lml + + self.f_cached = f # Remember solution for later warm-starts + if return_temporaries: + return log_marginal_likelihood, (pi, W_sr, L, b, a) + else: + return log_marginal_likelihood + + def _constrained_optimization(self, obj_func, initial_theta, bounds): + if self.optimizer == "fmin_l_bfgs_b": + opt_res = scipy.optimize.minimize( + obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds + ) + _check_optimize_result("lbfgs", opt_res) + theta_opt, func_min = opt_res.x, opt_res.fun + elif callable(self.optimizer): + theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds) + else: + raise ValueError("Unknown optimizer %s." % self.optimizer) + + return theta_opt, func_min + + +class GaussianProcessClassifier(ClassifierMixin, BaseEstimator): + """Gaussian process classification (GPC) based on Laplace approximation. + + The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_. + + Internally, the Laplace approximation is used for approximating the + non-Gaussian posterior by a Gaussian. + + Currently, the implementation is restricted to using the logistic link + function. For multi-class classification, several binary one-versus rest + classifiers are fitted. Note that this class thus does not implement + a true multi-class Laplace approximation. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : kernel instance, default=None + The kernel specifying the covariance function of the GP. If None is + passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that + the kernel's hyperparameters are optimized during fitting. Also kernel + cannot be a `CompoundKernel`. + + optimizer : 'fmin_l_bfgs_b', callable or None, default='fmin_l_bfgs_b' + Can either be one of the internally supported optimizers for optimizing + the kernel's parameters, specified by a string, or an externally + defined optimizer passed as a callable. If a callable is passed, it + must have the signature:: + + def optimizer(obj_func, initial_theta, bounds): + # * 'obj_func' is the objective function to be maximized, which + # takes the hyperparameters theta as parameter and an + # optional flag eval_gradient, which determines if the + # gradient is returned additionally to the function value + # * 'initial_theta': the initial value for theta, which can be + # used by local optimizers + # * 'bounds': the bounds on the values of theta + .... + # Returned are the best found hyperparameters theta and + # the corresponding value of the target function. + return theta_opt, func_min + + Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize + is used. If None is passed, the kernel's parameters are kept fixed. + Available internal optimizers are:: + + 'fmin_l_bfgs_b' + + n_restarts_optimizer : int, default=0 + The number of restarts of the optimizer for finding the kernel's + parameters which maximize the log-marginal likelihood. The first run + of the optimizer is performed from the kernel's initial parameters, + the remaining ones (if any) from thetas sampled log-uniform randomly + from the space of allowed theta-values. If greater than 0, all bounds + must be finite. Note that n_restarts_optimizer=0 implies that one + run is performed. + + max_iter_predict : int, default=100 + The maximum number of iterations in Newton's method for approximating + the posterior during predict. Smaller values will reduce computation + time at the cost of worse results. + + warm_start : bool, default=False + If warm-starts are enabled, the solution of the last Newton iteration + on the Laplace approximation of the posterior mode is used as + initialization for the next call of _posterior_mode(). This can speed + up convergence when _posterior_mode is called several times on similar + problems as in hyperparameter optimization. See :term:`the Glossary + `. + + copy_X_train : bool, default=True + If True, a persistent copy of the training data is stored in the + object. Otherwise, just a reference to the training data is stored, + which might cause predictions to change if the data is modified + externally. + + random_state : int, RandomState instance or None, default=None + Determines random number generation used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest' + Specifies how multi-class classification problems are handled. + Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest', + one binary Gaussian process classifier is fitted for each class, which + is trained to separate this class from the rest. In 'one_vs_one', one + binary Gaussian process classifier is fitted for each pair of classes, + which is trained to separate these two classes. The predictions of + these binary predictors are combined into multi-class predictions. + Note that 'one_vs_one' does not support predicting probability + estimates. + + n_jobs : int, default=None + The number of jobs to use for the computation: the specified + multiclass problems are computed in parallel. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + Attributes + ---------- + base_estimator_ : ``Estimator`` instance + The estimator instance that defines the likelihood function + using the observed data. + + kernel_ : kernel instance + The kernel used for prediction. In case of binary classification, + the structure of the kernel is the same as the one passed as parameter + but with optimized hyperparameters. In case of multi-class + classification, a CompoundKernel is returned which consists of the + different kernels used in the one-versus-rest classifiers. + + log_marginal_likelihood_value_ : float + The log-marginal-likelihood of ``self.kernel_.theta`` + + classes_ : array-like of shape (n_classes,) + Unique class labels. + + n_classes_ : int + The number of classes in the training data + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianProcessRegressor : Gaussian process regression (GPR). + + References + ---------- + .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, + "Gaussian Processes for Machine Learning", + MIT Press 2006 `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import RBF + >>> X, y = load_iris(return_X_y=True) + >>> kernel = 1.0 * RBF(1.0) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9866... + >>> gpc.predict_proba(X[:2,:]) + array([[0.83548752, 0.03228706, 0.13222543], + [0.79064206, 0.06525643, 0.14410151]]) + + For a comaprison of the GaussianProcessClassifier with other classifiers see: + :ref:`sphx_glr_auto_examples_classification_plot_classification_probability.py`. + """ + + _parameter_constraints: dict = { + "kernel": [Kernel, None], + "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None], + "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")], + "max_iter_predict": [Interval(Integral, 1, None, closed="left")], + "warm_start": ["boolean"], + "copy_X_train": ["boolean"], + "random_state": ["random_state"], + "multi_class": [StrOptions({"one_vs_rest", "one_vs_one"})], + "n_jobs": [Integral, None], + } + + def __init__( + self, + kernel=None, + *, + optimizer="fmin_l_bfgs_b", + n_restarts_optimizer=0, + max_iter_predict=100, + warm_start=False, + copy_X_train=True, + random_state=None, + multi_class="one_vs_rest", + n_jobs=None, + ): + self.kernel = kernel + self.optimizer = optimizer + self.n_restarts_optimizer = n_restarts_optimizer + self.max_iter_predict = max_iter_predict + self.warm_start = warm_start + self.copy_X_train = copy_X_train + self.random_state = random_state + self.multi_class = multi_class + self.n_jobs = n_jobs + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit Gaussian process classification model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data. + + y : array-like of shape (n_samples,) + Target values, must be binary. + + Returns + ------- + self : object + Returns an instance of self. + """ + if isinstance(self.kernel, CompoundKernel): + raise ValueError("kernel cannot be a CompoundKernel") + + if self.kernel is None or self.kernel.requires_vector_input: + X, y = validate_data( + self, X, y, multi_output=False, ensure_2d=True, dtype="numeric" + ) + else: + X, y = validate_data( + self, X, y, multi_output=False, ensure_2d=False, dtype=None + ) + + self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace( + kernel=self.kernel, + optimizer=self.optimizer, + n_restarts_optimizer=self.n_restarts_optimizer, + max_iter_predict=self.max_iter_predict, + warm_start=self.warm_start, + copy_X_train=self.copy_X_train, + random_state=self.random_state, + ) + + self.classes_ = np.unique(y) + self.n_classes_ = self.classes_.size + if self.n_classes_ == 1: + raise ValueError( + "GaussianProcessClassifier requires 2 or more " + "distinct classes; got %d class (only class %s " + "is present)" % (self.n_classes_, self.classes_[0]) + ) + if self.n_classes_ > 2: + if self.multi_class == "one_vs_rest": + self.base_estimator_ = OneVsRestClassifier( + self.base_estimator_, n_jobs=self.n_jobs + ) + elif self.multi_class == "one_vs_one": + self.base_estimator_ = OneVsOneClassifier( + self.base_estimator_, n_jobs=self.n_jobs + ) + else: + raise ValueError("Unknown multi-class mode %s" % self.multi_class) + + self.base_estimator_.fit(X, y) + + if self.n_classes_ > 2: + self.log_marginal_likelihood_value_ = np.mean( + [ + estimator.log_marginal_likelihood() + for estimator in self.base_estimator_.estimators_ + ] + ) + else: + self.log_marginal_likelihood_value_ = ( + self.base_estimator_.log_marginal_likelihood() + ) + + return self + + def predict(self, X): + """Perform classification on an array of test vectors X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : ndarray of shape (n_samples,) + Predicted target values for X, values are from ``classes_``. + """ + check_is_fitted(self) + + if self.kernel is None or self.kernel.requires_vector_input: + X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False) + else: + X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False) + + return self.base_estimator_.predict(X) + + def predict_proba(self, X): + """Return probability estimates for the test vector X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated for classification. + + Returns + ------- + C : array-like of shape (n_samples, n_classes) + Returns the probability of the samples for each class in + the model. The columns correspond to the classes in sorted + order, as they appear in the attribute :term:`classes_`. + """ + check_is_fitted(self) + if self.n_classes_ > 2 and self.multi_class == "one_vs_one": + raise ValueError( + "one_vs_one multi-class mode does not support " + "predicting probability estimates. Use " + "one_vs_rest mode instead." + ) + + if self.kernel is None or self.kernel.requires_vector_input: + X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False) + else: + X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False) + + return self.base_estimator_.predict_proba(X) + + @property + def kernel_(self): + """Return the kernel of the base estimator.""" + if self.n_classes_ == 2: + return self.base_estimator_.kernel_ + else: + return CompoundKernel( + [estimator.kernel_ for estimator in self.base_estimator_.estimators_] + ) + + def log_marginal_likelihood( + self, theta=None, eval_gradient=False, clone_kernel=True + ): + """Return log-marginal likelihood of theta for training data. + + In the case of multi-class classification, the mean log-marginal + likelihood of the one-versus-rest classifiers are returned. + + Parameters + ---------- + theta : array-like of shape (n_kernel_params,), default=None + Kernel hyperparameters for which the log-marginal likelihood is + evaluated. In the case of multi-class classification, theta may + be the hyperparameters of the compound kernel or of an individual + kernel. In the latter case, all individual kernel get assigned the + same theta values. If None, the precomputed log_marginal_likelihood + of ``self.kernel_.theta`` is returned. + + eval_gradient : bool, default=False + If True, the gradient of the log-marginal likelihood with respect + to the kernel hyperparameters at position theta is returned + additionally. Note that gradient computation is not supported + for non-binary classification. If True, theta must not be None. + + clone_kernel : bool, default=True + If True, the kernel attribute is copied. If False, the kernel + attribute is modified, but may result in a performance improvement. + + Returns + ------- + log_likelihood : float + Log-marginal likelihood of theta for training data. + + log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional + Gradient of the log-marginal likelihood with respect to the kernel + hyperparameters at position theta. + Only returned when `eval_gradient` is True. + """ + check_is_fitted(self) + + if theta is None: + if eval_gradient: + raise ValueError("Gradient can only be evaluated for theta!=None") + return self.log_marginal_likelihood_value_ + + theta = np.asarray(theta) + if self.n_classes_ == 2: + return self.base_estimator_.log_marginal_likelihood( + theta, eval_gradient, clone_kernel=clone_kernel + ) + else: + if eval_gradient: + raise NotImplementedError( + "Gradient of log-marginal-likelihood not implemented for " + "multi-class GPC." + ) + estimators = self.base_estimator_.estimators_ + n_dims = estimators[0].kernel_.n_dims + if theta.shape[0] == n_dims: # use same theta for all sub-kernels + return np.mean( + [ + estimator.log_marginal_likelihood( + theta, clone_kernel=clone_kernel + ) + for i, estimator in enumerate(estimators) + ] + ) + elif theta.shape[0] == n_dims * self.classes_.shape[0]: + # theta for compound kernel + return np.mean( + [ + estimator.log_marginal_likelihood( + theta[n_dims * i : n_dims * (i + 1)], + clone_kernel=clone_kernel, + ) + for i, estimator in enumerate(estimators) + ] + ) + else: + raise ValueError( + "Shape of theta must be either %d or %d. " + "Obtained theta with shape %d." + % (n_dims, n_dims * self.classes_.shape[0], theta.shape[0]) + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py new file mode 100644 index 0000000000000000000000000000000000000000..d30854eae8e8690b2c2ef51ec4a6378b3ddb5766 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/_gpr.py @@ -0,0 +1,672 @@ +"""Gaussian processes regression.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from numbers import Integral, Real +from operator import itemgetter + +import numpy as np +import scipy.optimize +from scipy.linalg import cho_solve, cholesky, solve_triangular + +from ..base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context, clone +from ..preprocessing._data import _handle_zeros_in_scale +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.optimize import _check_optimize_result +from ..utils.validation import validate_data +from .kernels import RBF, Kernel +from .kernels import ConstantKernel as C + +GPR_CHOLESKY_LOWER = True + + +class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): + """Gaussian process regression (GPR). + + The implementation is based on Algorithm 2.1 of [RW2006]_. + + In addition to standard scikit-learn estimator API, + :class:`GaussianProcessRegressor`: + + * allows prediction without prior fitting (based on the GP prior) + * provides an additional method `sample_y(X)`, which evaluates samples + drawn from the GPR (prior or posterior) at given inputs + * exposes a method `log_marginal_likelihood(theta)`, which can be used + externally for other ways of selecting hyperparameters, e.g., via + Markov chain Monte Carlo. + + To learn the difference between a point-estimate approach vs. a more + Bayesian modelling approach, refer to the example entitled + :ref:`sphx_glr_auto_examples_gaussian_process_plot_compare_gpr_krr.py`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : kernel instance, default=None + The kernel specifying the covariance function of the GP. If None is + passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed") + * RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that + the kernel hyperparameters are optimized during fitting unless the + bounds are marked as "fixed". + + alpha : float or ndarray of shape (n_samples,), default=1e-10 + Value added to the diagonal of the kernel matrix during fitting. + This can prevent a potential numerical issue during fitting, by + ensuring that the calculated values form a positive definite matrix. + It can also be interpreted as the variance of additional Gaussian + measurement noise on the training observations. Note that this is + different from using a `WhiteKernel`. If an array is passed, it must + have the same number of entries as the data used for fitting and is + used as datapoint-dependent noise level. Allowing to specify the + noise level directly as a parameter is mainly for convenience and + for consistency with :class:`~sklearn.linear_model.Ridge`. + + optimizer : "fmin_l_bfgs_b", callable or None, default="fmin_l_bfgs_b" + Can either be one of the internally supported optimizers for optimizing + the kernel's parameters, specified by a string, or an externally + defined optimizer passed as a callable. If a callable is passed, it + must have the signature:: + + def optimizer(obj_func, initial_theta, bounds): + # * 'obj_func': the objective function to be minimized, which + # takes the hyperparameters theta as a parameter and an + # optional flag eval_gradient, which determines if the + # gradient is returned additionally to the function value + # * 'initial_theta': the initial value for theta, which can be + # used by local optimizers + # * 'bounds': the bounds on the values of theta + .... + # Returned are the best found hyperparameters theta and + # the corresponding value of the target function. + return theta_opt, func_min + + Per default, the L-BFGS-B algorithm from `scipy.optimize.minimize` + is used. If None is passed, the kernel's parameters are kept fixed. + Available internal optimizers are: `{'fmin_l_bfgs_b'}`. + + n_restarts_optimizer : int, default=0 + The number of restarts of the optimizer for finding the kernel's + parameters which maximize the log-marginal likelihood. The first run + of the optimizer is performed from the kernel's initial parameters, + the remaining ones (if any) from thetas sampled log-uniform randomly + from the space of allowed theta-values. If greater than 0, all bounds + must be finite. Note that `n_restarts_optimizer == 0` implies that one + run is performed. + + normalize_y : bool, default=False + Whether or not to normalize the target values `y` by removing the mean + and scaling to unit-variance. This is recommended for cases where + zero-mean, unit-variance priors are used. Note that, in this + implementation, the normalisation is reversed before the GP predictions + are reported. + + .. versionchanged:: 0.23 + + copy_X_train : bool, default=True + If True, a persistent copy of the training data is stored in the + object. Otherwise, just a reference to the training data is stored, + which might cause predictions to change if the data is modified + externally. + + n_targets : int, default=None + The number of dimensions of the target values. Used to decide the number + of outputs when sampling from the prior distributions (i.e. calling + :meth:`sample_y` before :meth:`fit`). This parameter is ignored once + :meth:`fit` has been called. + + .. versionadded:: 1.3 + + random_state : int, RandomState instance or None, default=None + Determines random number generation used to initialize the centers. + Pass an int for reproducible results across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + X_train_ : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data (also + required for prediction). + + y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values in training data (also required for prediction). + + kernel_ : kernel instance + The kernel used for prediction. The structure of the kernel is the + same as the one passed as parameter but with optimized hyperparameters. + + L_ : array-like of shape (n_samples, n_samples) + Lower-triangular Cholesky decomposition of the kernel in ``X_train_``. + + alpha_ : array-like of shape (n_samples,) + Dual coefficients of training data points in kernel space. + + log_marginal_likelihood_value_ : float + The log-marginal-likelihood of ``self.kernel_.theta``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianProcessClassifier : Gaussian process classification (GPC) + based on Laplace approximation. + + References + ---------- + .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, + "Gaussian Processes for Machine Learning", + MIT Press 2006 `_ + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = DotProduct() + WhiteKernel() + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3680... + >>> gpr.predict(X[:2,:], return_std=True) + (array([653.0..., 592.1...]), array([316.6..., 316.6...])) + """ + + _parameter_constraints: dict = { + "kernel": [None, Kernel], + "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray], + "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None], + "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")], + "normalize_y": ["boolean"], + "copy_X_train": ["boolean"], + "n_targets": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + } + + def __init__( + self, + kernel=None, + *, + alpha=1e-10, + optimizer="fmin_l_bfgs_b", + n_restarts_optimizer=0, + normalize_y=False, + copy_X_train=True, + n_targets=None, + random_state=None, + ): + self.kernel = kernel + self.alpha = alpha + self.optimizer = optimizer + self.n_restarts_optimizer = n_restarts_optimizer + self.normalize_y = normalize_y + self.copy_X_train = copy_X_train + self.n_targets = n_targets + self.random_state = random_state + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit Gaussian process regression model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Feature vectors or other representations of training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Returns + ------- + self : object + GaussianProcessRegressor class instance. + """ + if self.kernel is None: # Use an RBF kernel as default + self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + else: + self.kernel_ = clone(self.kernel) + + self._rng = check_random_state(self.random_state) + + if self.kernel_.requires_vector_input: + dtype, ensure_2d = "numeric", True + else: + dtype, ensure_2d = None, False + X, y = validate_data( + self, + X, + y, + multi_output=True, + y_numeric=True, + ensure_2d=ensure_2d, + dtype=dtype, + ) + + n_targets_seen = y.shape[1] if y.ndim > 1 else 1 + if self.n_targets is not None and n_targets_seen != self.n_targets: + raise ValueError( + "The number of targets seen in `y` is different from the parameter " + f"`n_targets`. Got {n_targets_seen} != {self.n_targets}." + ) + + # Normalize target value + if self.normalize_y: + self._y_train_mean = np.mean(y, axis=0) + self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False) + + # Remove mean and make unit variance + y = (y - self._y_train_mean) / self._y_train_std + + else: + shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1 + self._y_train_mean = np.zeros(shape=shape_y_stats) + self._y_train_std = np.ones(shape=shape_y_stats) + + if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]: + if self.alpha.shape[0] == 1: + self.alpha = self.alpha[0] + else: + raise ValueError( + "alpha must be a scalar or an array with same number of " + f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})" + ) + + self.X_train_ = np.copy(X) if self.copy_X_train else X + self.y_train_ = np.copy(y) if self.copy_X_train else y + + if self.optimizer is not None and self.kernel_.n_dims > 0: + # Choose hyperparameters based on maximizing the log-marginal + # likelihood (potentially starting from several initial values) + def obj_func(theta, eval_gradient=True): + if eval_gradient: + lml, grad = self.log_marginal_likelihood( + theta, eval_gradient=True, clone_kernel=False + ) + return -lml, -grad + else: + return -self.log_marginal_likelihood(theta, clone_kernel=False) + + # First optimize starting from theta specified in kernel + optima = [ + ( + self._constrained_optimization( + obj_func, self.kernel_.theta, self.kernel_.bounds + ) + ) + ] + + # Additional runs are performed from log-uniform chosen initial + # theta + if self.n_restarts_optimizer > 0: + if not np.isfinite(self.kernel_.bounds).all(): + raise ValueError( + "Multiple optimizer restarts (n_restarts_optimizer>0) " + "requires that all bounds are finite." + ) + bounds = self.kernel_.bounds + for iteration in range(self.n_restarts_optimizer): + theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1]) + optima.append( + self._constrained_optimization(obj_func, theta_initial, bounds) + ) + # Select result from run with minimal (negative) log-marginal + # likelihood + lml_values = list(map(itemgetter(1), optima)) + self.kernel_.theta = optima[np.argmin(lml_values)][0] + self.kernel_._check_bounds_params() + + self.log_marginal_likelihood_value_ = -np.min(lml_values) + else: + self.log_marginal_likelihood_value_ = self.log_marginal_likelihood( + self.kernel_.theta, clone_kernel=False + ) + + # Precompute quantities required for predictions which are independent + # of actual query points + # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) + K = self.kernel_(self.X_train_) + K[np.diag_indices_from(K)] += self.alpha + try: + self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) + except np.linalg.LinAlgError as exc: + exc.args = ( + ( + f"The kernel, {self.kernel_}, is not returning a positive " + "definite matrix. Try gradually increasing the 'alpha' " + "parameter of your GaussianProcessRegressor estimator." + ), + ) + exc.args + raise + # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) + self.alpha_ = cho_solve( + (self.L_, GPR_CHOLESKY_LOWER), + self.y_train_, + check_finite=False, + ) + return self + + def predict(self, X, return_std=False, return_cov=False): + """Predict using the Gaussian process regression model. + + We can also predict based on an unfitted model by using the GP prior. + In addition to the mean of the predictive distribution, optionally also + returns its standard deviation (`return_std=True`) or covariance + (`return_cov=True`). Note that at most one of the two can be requested. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) or list of object + Query points where the GP is evaluated. + + return_std : bool, default=False + If True, the standard-deviation of the predictive distribution at + the query points is returned along with the mean. + + return_cov : bool, default=False + If True, the covariance of the joint predictive distribution at + the query points is returned along with the mean. + + Returns + ------- + y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets) + Mean of predictive distribution at query points. + + y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional + Standard deviation of predictive distribution at query points. + Only returned when `return_std` is True. + + y_cov : ndarray of shape (n_samples, n_samples) or \ + (n_samples, n_samples, n_targets), optional + Covariance of joint predictive distribution at query points. + Only returned when `return_cov` is True. + """ + if return_std and return_cov: + raise RuntimeError( + "At most one of return_std or return_cov can be requested." + ) + + if self.kernel is None or self.kernel.requires_vector_input: + dtype, ensure_2d = "numeric", True + else: + dtype, ensure_2d = None, False + + X = validate_data(self, X, ensure_2d=ensure_2d, dtype=dtype, reset=False) + + if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior + if self.kernel is None: + kernel = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + else: + kernel = self.kernel + + n_targets = self.n_targets if self.n_targets is not None else 1 + y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze() + + if return_cov: + y_cov = kernel(X) + if n_targets > 1: + y_cov = np.repeat( + np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1 + ) + return y_mean, y_cov + elif return_std: + y_var = kernel.diag(X) + if n_targets > 1: + y_var = np.repeat( + np.expand_dims(y_var, -1), repeats=n_targets, axis=-1 + ) + return y_mean, np.sqrt(y_var) + else: + return y_mean + else: # Predict based on GP posterior + # Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha + K_trans = self.kernel_(X, self.X_train_) + y_mean = K_trans @ self.alpha_ + + # undo normalisation + y_mean = self._y_train_std * y_mean + self._y_train_mean + + # if y_mean has shape (n_samples, 1), reshape to (n_samples,) + if y_mean.ndim > 1 and y_mean.shape[1] == 1: + y_mean = np.squeeze(y_mean, axis=1) + + # Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T + V = solve_triangular( + self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False + ) + + if return_cov: + # Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v + y_cov = self.kernel_(X) - V.T @ V + + # undo normalisation + y_cov = np.outer(y_cov, self._y_train_std**2).reshape(*y_cov.shape, -1) + # if y_cov has shape (n_samples, n_samples, 1), reshape to + # (n_samples, n_samples) + if y_cov.shape[2] == 1: + y_cov = np.squeeze(y_cov, axis=2) + + return y_mean, y_cov + elif return_std: + # Compute variance of predictive distribution + # Use einsum to avoid explicitly forming the large matrix + # V^T @ V just to extract its diagonal afterward. + y_var = self.kernel_.diag(X).copy() + y_var -= np.einsum("ij,ji->i", V.T, V) + + # Check if any of the variances is negative because of + # numerical issues. If yes: set the variance to 0. + y_var_negative = y_var < 0 + if np.any(y_var_negative): + warnings.warn( + "Predicted variances smaller than 0. " + "Setting those variances to 0." + ) + y_var[y_var_negative] = 0.0 + + # undo normalisation + y_var = np.outer(y_var, self._y_train_std**2).reshape(*y_var.shape, -1) + + # if y_var has shape (n_samples, 1), reshape to (n_samples,) + if y_var.shape[1] == 1: + y_var = np.squeeze(y_var, axis=1) + + return y_mean, np.sqrt(y_var) + else: + return y_mean + + def sample_y(self, X, n_samples=1, random_state=0): + """Draw samples from Gaussian process and evaluate at X. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Query points where the GP is evaluated. + + n_samples : int, default=1 + Number of samples drawn from the Gaussian process per query point. + + random_state : int, RandomState instance or None, default=0 + Determines random number generation to randomly draw samples. + Pass an int for reproducible results across multiple function + calls. + See :term:`Glossary `. + + Returns + ------- + y_samples : ndarray of shape (n_samples_X, n_samples), or \ + (n_samples_X, n_targets, n_samples) + Values of n_samples samples drawn from Gaussian process and + evaluated at query points. + """ + rng = check_random_state(random_state) + + y_mean, y_cov = self.predict(X, return_cov=True) + if y_mean.ndim == 1: + y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T + else: + y_samples = [ + rng.multivariate_normal( + y_mean[:, target], y_cov[..., target], n_samples + ).T[:, np.newaxis] + for target in range(y_mean.shape[1]) + ] + y_samples = np.hstack(y_samples) + return y_samples + + def log_marginal_likelihood( + self, theta=None, eval_gradient=False, clone_kernel=True + ): + """Return log-marginal likelihood of theta for training data. + + Parameters + ---------- + theta : array-like of shape (n_kernel_params,) default=None + Kernel hyperparameters for which the log-marginal likelihood is + evaluated. If None, the precomputed log_marginal_likelihood + of ``self.kernel_.theta`` is returned. + + eval_gradient : bool, default=False + If True, the gradient of the log-marginal likelihood with respect + to the kernel hyperparameters at position theta is returned + additionally. If True, theta must not be None. + + clone_kernel : bool, default=True + If True, the kernel attribute is copied. If False, the kernel + attribute is modified, but may result in a performance improvement. + + Returns + ------- + log_likelihood : float + Log-marginal likelihood of theta for training data. + + log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional + Gradient of the log-marginal likelihood with respect to the kernel + hyperparameters at position theta. + Only returned when eval_gradient is True. + """ + if theta is None: + if eval_gradient: + raise ValueError("Gradient can only be evaluated for theta!=None") + return self.log_marginal_likelihood_value_ + + if clone_kernel: + kernel = self.kernel_.clone_with_theta(theta) + else: + kernel = self.kernel_ + kernel.theta = theta + + if eval_gradient: + K, K_gradient = kernel(self.X_train_, eval_gradient=True) + else: + K = kernel(self.X_train_) + + # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) + K[np.diag_indices_from(K)] += self.alpha + try: + L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) + except np.linalg.LinAlgError: + return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf + + # Support multi-dimensional output of self.y_train_ + y_train = self.y_train_ + if y_train.ndim == 1: + y_train = y_train[:, np.newaxis] + + # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) + alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False) + + # Alg 2.1, page 19, line 7 + # -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi) + # y is originally thought to be a (1, n_samples) row vector. However, + # in multioutputs, y is of shape (n_samples, 2) and we need to compute + # y^T . alpha for each output, independently using einsum. Thus, it + # is equivalent to: + # for output_idx in range(n_outputs): + # log_likelihood_dims[output_idx] = ( + # y_train[:, [output_idx]] @ alpha[:, [output_idx]] + # ) + log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha) + log_likelihood_dims -= np.log(np.diag(L)).sum() + log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi) + # the log likehood is sum-up across the outputs + log_likelihood = log_likelihood_dims.sum(axis=-1) + + if eval_gradient: + # Eq. 5.9, p. 114, and footnote 5 in p. 114 + # 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient) + # alpha is supposed to be a vector of (n_samples,) elements. With + # multioutputs, alpha is a matrix of size (n_samples, n_outputs). + # Therefore, we want to construct a matrix of + # (n_samples, n_samples, n_outputs) equivalent to + # for output_idx in range(n_outputs): + # output_alpha = alpha[:, [output_idx]] + # inner_term[..., output_idx] = output_alpha @ output_alpha.T + inner_term = np.einsum("ik,jk->ijk", alpha, alpha) + # compute K^-1 of shape (n_samples, n_samples) + K_inv = cho_solve( + (L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False + ) + # create a new axis to use broadcasting between inner_term and + # K_inv + inner_term -= K_inv[..., np.newaxis] + # Since we are interested about the trace of + # inner_term @ K_gradient, we don't explicitly compute the + # matrix-by-matrix operation and instead use an einsum. Therefore + # it is equivalent to: + # for param_idx in range(n_kernel_params): + # for output_idx in range(n_output): + # log_likehood_gradient_dims[param_idx, output_idx] = ( + # inner_term[..., output_idx] @ + # K_gradient[..., param_idx] + # ) + log_likelihood_gradient_dims = 0.5 * np.einsum( + "ijl,jik->kl", inner_term, K_gradient + ) + # the log likehood gradient is the sum-up across the outputs + log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1) + + if eval_gradient: + return log_likelihood, log_likelihood_gradient + else: + return log_likelihood + + def _constrained_optimization(self, obj_func, initial_theta, bounds): + if self.optimizer == "fmin_l_bfgs_b": + opt_res = scipy.optimize.minimize( + obj_func, + initial_theta, + method="L-BFGS-B", + jac=True, + bounds=bounds, + ) + _check_optimize_result("lbfgs", opt_res) + theta_opt, func_min = opt_res.x, opt_res.fun + elif callable(self.optimizer): + theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds) + else: + raise ValueError(f"Unknown optimizer {self.optimizer}.") + + return theta_opt, func_min + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.requires_fit = False + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py new file mode 100644 index 0000000000000000000000000000000000000000..07db98d69289b5cc84f361e015dc7a36ebd6dc86 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/kernels.py @@ -0,0 +1,2410 @@ +"""A set of kernels that can be combined by operators and used in Gaussian processes.""" + +# Kernels for Gaussian process regression and classification. +# +# The kernels in this module allow kernel-engineering, i.e., they can be +# combined via the "+" and "*" operators or be exponentiated with a scalar +# via "**". These sum and product expressions can also contain scalar values, +# which are automatically converted to a constant kernel. +# +# All kernels allow (analytic) gradient-based hyperparameter optimization. +# The space of hyperparameters can be specified by giving lower und upper +# boundaries for the value of each hyperparameter (the search space is thus +# rectangular). Instead of specifying bounds, hyperparameters can also be +# declared to be "fixed", which causes these hyperparameters to be excluded from +# optimization. + + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +# Note: this module is strongly inspired by the kernel module of the george +# package. + +import math +import warnings +from abc import ABCMeta, abstractmethod +from collections import namedtuple +from inspect import signature + +import numpy as np +from scipy.spatial.distance import cdist, pdist, squareform +from scipy.special import gamma, kv + +from ..base import clone +from ..exceptions import ConvergenceWarning +from ..metrics.pairwise import pairwise_kernels +from ..utils.validation import _num_samples + + +def _check_length_scale(X, length_scale): + length_scale = np.squeeze(length_scale).astype(float) + if np.ndim(length_scale) > 1: + raise ValueError("length_scale cannot be of dimension greater than 1") + if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]: + raise ValueError( + "Anisotropic kernel must have the same number of " + "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1]) + ) + return length_scale + + +class Hyperparameter( + namedtuple( + "Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed") + ) +): + """A kernel hyperparameter's specification in form of a namedtuple. + + .. versionadded:: 0.18 + + Attributes + ---------- + name : str + The name of the hyperparameter. Note that a kernel using a + hyperparameter with name "x" must have the attributes self.x and + self.x_bounds + + value_type : str + The type of the hyperparameter. Currently, only "numeric" + hyperparameters are supported. + + bounds : pair of floats >= 0 or "fixed" + The lower and upper bound on the parameter. If n_elements>1, a pair + of 1d array with n_elements each may be given alternatively. If + the string "fixed" is passed as bounds, the hyperparameter's value + cannot be changed. + + n_elements : int, default=1 + The number of elements of the hyperparameter value. Defaults to 1, + which corresponds to a scalar hyperparameter. n_elements > 1 + corresponds to a hyperparameter which is vector-valued, + such as, e.g., anisotropic length-scales. + + fixed : bool, default=None + Whether the value of this hyperparameter is fixed, i.e., cannot be + changed during hyperparameter tuning. If None is passed, the "fixed" is + derived based on the given bounds. + + Examples + -------- + >>> from sklearn.gaussian_process.kernels import ConstantKernel + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import Hyperparameter + >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0) + >>> kernel = ConstantKernel(constant_value=1.0, + ... constant_value_bounds=(0.0, 10.0)) + + We can access each hyperparameter: + + >>> for hyperparameter in kernel.hyperparameters: + ... print(hyperparameter) + Hyperparameter(name='constant_value', value_type='numeric', + bounds=array([[ 0., 10.]]), n_elements=1, fixed=False) + + >>> params = kernel.get_params() + >>> for key in sorted(params): print(f"{key} : {params[key]}") + constant_value : 1.0 + constant_value_bounds : (0.0, 10.0) + """ + + # A raw namedtuple is very memory efficient as it packs the attributes + # in a struct to get rid of the __dict__ of attributes in particular it + # does not copy the string for the keys on each instance. + # By deriving a namedtuple class just to introduce the __init__ method we + # would also reintroduce the __dict__ on the instance. By telling the + # Python interpreter that this subclass uses static __slots__ instead of + # dynamic attributes. Furthermore we don't need any additional slot in the + # subclass so we set __slots__ to the empty tuple. + __slots__ = () + + def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None): + if not isinstance(bounds, str) or bounds != "fixed": + bounds = np.atleast_2d(bounds) + if n_elements > 1: # vector-valued parameter + if bounds.shape[0] == 1: + bounds = np.repeat(bounds, n_elements, 0) + elif bounds.shape[0] != n_elements: + raise ValueError( + "Bounds on %s should have either 1 or " + "%d dimensions. Given are %d" + % (name, n_elements, bounds.shape[0]) + ) + + if fixed is None: + fixed = isinstance(bounds, str) and bounds == "fixed" + return super(Hyperparameter, cls).__new__( + cls, name, value_type, bounds, n_elements, fixed + ) + + # This is mainly a testing utility to check that two hyperparameters + # are equal. + def __eq__(self, other): + return ( + self.name == other.name + and self.value_type == other.value_type + and np.all(self.bounds == other.bounds) + and self.n_elements == other.n_elements + and self.fixed == other.fixed + ) + + +class Kernel(metaclass=ABCMeta): + """Base class for all kernels. + + .. versionadded:: 0.18 + + Examples + -------- + >>> from sklearn.gaussian_process.kernels import Kernel, RBF + >>> import numpy as np + >>> class CustomKernel(Kernel): + ... def __init__(self, length_scale=1.0): + ... self.length_scale = length_scale + ... def __call__(self, X, Y=None): + ... if Y is None: + ... Y = X + ... return np.inner(X, X if Y is None else Y) ** 2 + ... def diag(self, X): + ... return np.ones(X.shape[0]) + ... def is_stationary(self): + ... return True + >>> kernel = CustomKernel(length_scale=2.0) + >>> X = np.array([[1, 2], [3, 4]]) + >>> print(kernel(X)) + [[ 25 121] + [121 625]] + """ + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + params = dict() + + # introspect the constructor arguments to find the model parameters + # to represent + cls = self.__class__ + init = getattr(cls.__init__, "deprecated_original", cls.__init__) + init_sign = signature(init) + args, varargs = [], [] + for parameter in init_sign.parameters.values(): + if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self": + args.append(parameter.name) + if parameter.kind == parameter.VAR_POSITIONAL: + varargs.append(parameter.name) + + if len(varargs) != 0: + raise RuntimeError( + "scikit-learn kernels should always " + "specify their parameters in the signature" + " of their __init__ (no varargs)." + " %s doesn't follow this convention." % (cls,) + ) + for arg in args: + params[arg] = getattr(self, arg) + + return params + + def set_params(self, **params): + """Set the parameters of this kernel. + + The method works on simple kernels as well as on nested kernels. + The latter have parameters of the form ``__`` + so that it's possible to update each component of a nested object. + + Returns + ------- + self + """ + if not params: + # Simple optimisation to gain speed (inspect is slow) + return self + valid_params = self.get_params(deep=True) + for key, value in params.items(): + split = key.split("__", 1) + if len(split) > 1: + # nested objects case + name, sub_name = split + if name not in valid_params: + raise ValueError( + "Invalid parameter %s for kernel %s. " + "Check the list of available parameters " + "with `kernel.get_params().keys()`." % (name, self) + ) + sub_object = valid_params[name] + sub_object.set_params(**{sub_name: value}) + else: + # simple objects case + if key not in valid_params: + raise ValueError( + "Invalid parameter %s for kernel %s. " + "Check the list of available parameters " + "with `kernel.get_params().keys()`." + % (key, self.__class__.__name__) + ) + setattr(self, key, value) + return self + + def clone_with_theta(self, theta): + """Returns a clone of self with given hyperparameters theta. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The hyperparameters + """ + cloned = clone(self) + cloned.theta = theta + return cloned + + @property + def n_dims(self): + """Returns the number of non-fixed hyperparameters of the kernel.""" + return self.theta.shape[0] + + @property + def hyperparameters(self): + """Returns a list of all hyperparameter specifications.""" + r = [ + getattr(self, attr) + for attr in dir(self) + if attr.startswith("hyperparameter_") + ] + return r + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + theta = [] + params = self.get_params() + for hyperparameter in self.hyperparameters: + if not hyperparameter.fixed: + theta.append(params[hyperparameter.name]) + if len(theta) > 0: + return np.log(np.hstack(theta)) + else: + return np.array([]) + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + params = self.get_params() + i = 0 + for hyperparameter in self.hyperparameters: + if hyperparameter.fixed: + continue + if hyperparameter.n_elements > 1: + # vector-valued parameter + params[hyperparameter.name] = np.exp( + theta[i : i + hyperparameter.n_elements] + ) + i += hyperparameter.n_elements + else: + params[hyperparameter.name] = np.exp(theta[i]) + i += 1 + + if i != len(theta): + raise ValueError( + "theta has not the correct number of entries." + " Should be %d; given are %d" % (i, len(theta)) + ) + self.set_params(**params) + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : ndarray of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + bounds = [ + hyperparameter.bounds + for hyperparameter in self.hyperparameters + if not hyperparameter.fixed + ] + if len(bounds) > 0: + return np.log(np.vstack(bounds)) + else: + return np.array([]) + + def __add__(self, b): + if not isinstance(b, Kernel): + return Sum(self, ConstantKernel(b)) + return Sum(self, b) + + def __radd__(self, b): + if not isinstance(b, Kernel): + return Sum(ConstantKernel(b), self) + return Sum(b, self) + + def __mul__(self, b): + if not isinstance(b, Kernel): + return Product(self, ConstantKernel(b)) + return Product(self, b) + + def __rmul__(self, b): + if not isinstance(b, Kernel): + return Product(ConstantKernel(b), self) + return Product(b, self) + + def __pow__(self, b): + return Exponentiation(self, b) + + def __eq__(self, b): + if type(self) != type(b): + return False + params_a = self.get_params() + params_b = b.get_params() + for key in set(list(params_a.keys()) + list(params_b.keys())): + if np.any(params_a.get(key, None) != params_b.get(key, None)): + return False + return True + + def __repr__(self): + return "{0}({1})".format( + self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta)) + ) + + @abstractmethod + def __call__(self, X, Y=None, eval_gradient=False): + """Evaluate the kernel.""" + + @abstractmethod + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples,) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + + @abstractmethod + def is_stationary(self): + """Returns whether the kernel is stationary.""" + + @property + def requires_vector_input(self): + """Returns whether the kernel is defined on fixed-length feature + vectors or generic objects. Defaults to True for backward + compatibility.""" + return True + + def _check_bounds_params(self): + """Called after fitting to warn if bounds may have been too tight.""" + list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T) + idx = 0 + for hyp in self.hyperparameters: + if hyp.fixed: + continue + for dim in range(hyp.n_elements): + if list_close[idx, 0]: + warnings.warn( + "The optimal value found for " + "dimension %s of parameter %s is " + "close to the specified lower " + "bound %s. Decreasing the bound and" + " calling fit again may find a " + "better value." % (dim, hyp.name, hyp.bounds[dim][0]), + ConvergenceWarning, + ) + elif list_close[idx, 1]: + warnings.warn( + "The optimal value found for " + "dimension %s of parameter %s is " + "close to the specified upper " + "bound %s. Increasing the bound and" + " calling fit again may find a " + "better value." % (dim, hyp.name, hyp.bounds[dim][1]), + ConvergenceWarning, + ) + idx += 1 + + +class NormalizedKernelMixin: + """Mixin for kernels which are normalized: k(X, X)=1. + + .. versionadded:: 0.18 + """ + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return np.ones(X.shape[0]) + + +class StationaryKernelMixin: + """Mixin for kernels which are stationary: k(X, Y)= f(X-Y). + + .. versionadded:: 0.18 + """ + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return True + + +class GenericKernelMixin: + """Mixin for kernels which operate on generic objects such as variable- + length sequences, trees, and graphs. + + .. versionadded:: 0.22 + """ + + @property + def requires_vector_input(self): + """Whether the kernel works only on fixed-length feature vectors.""" + return False + + +class CompoundKernel(Kernel): + """Kernel which is composed of a set of other kernels. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernels : list of Kernels + The other kernels + + Examples + -------- + >>> from sklearn.gaussian_process.kernels import WhiteKernel + >>> from sklearn.gaussian_process.kernels import RBF + >>> from sklearn.gaussian_process.kernels import CompoundKernel + >>> kernel = CompoundKernel( + ... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)]) + >>> print(kernel.bounds) + [[-11.51292546 11.51292546] + [-11.51292546 11.51292546]] + >>> print(kernel.n_dims) + 2 + >>> print(kernel.theta) + [1.09861229 0.69314718] + """ + + def __init__(self, kernels): + self.kernels = kernels + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + return dict(kernels=self.kernels) + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + return np.hstack([kernel.theta for kernel in self.kernels]) + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : array of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + k_dims = self.k1.n_dims + for i, kernel in enumerate(self.kernels): + kernel.theta = theta[i * k_dims : (i + 1) * k_dims] + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : array of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + return np.vstack([kernel.bounds for kernel in self.kernels]) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Note that this compound kernel returns the results of all simple kernel + stacked along an additional axis. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object, \ + default=None + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object, \ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of the + kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels) + Kernel k(X, Y) + + K_gradient : ndarray of shape \ + (n_samples_X, n_samples_X, n_dims, n_kernels), optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K = [] + K_grad = [] + for kernel in self.kernels: + K_single, K_grad_single = kernel(X, Y, eval_gradient) + K.append(K_single) + K_grad.append(K_grad_single[..., np.newaxis]) + return np.dstack(K), np.concatenate(K_grad, 3) + else: + return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels]) + + def __eq__(self, b): + if type(self) != type(b) or len(self.kernels) != len(b.kernels): + return False + return np.all( + [self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))] + ) + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return np.all([kernel.is_stationary() for kernel in self.kernels]) + + @property + def requires_vector_input(self): + """Returns whether the kernel is defined on discrete structures.""" + return np.any([kernel.requires_vector_input for kernel in self.kernels]) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to `np.diag(self(X))`; however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X, n_kernels) + Diagonal of kernel k(X, X) + """ + return np.vstack([kernel.diag(X) for kernel in self.kernels]).T + + +class KernelOperator(Kernel): + """Base class for all kernel operators. + + .. versionadded:: 0.18 + """ + + def __init__(self, k1, k2): + self.k1 = k1 + self.k2 = k2 + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + params = dict(k1=self.k1, k2=self.k2) + if deep: + deep_items = self.k1.get_params().items() + params.update(("k1__" + k, val) for k, val in deep_items) + deep_items = self.k2.get_params().items() + params.update(("k2__" + k, val) for k, val in deep_items) + + return params + + @property + def hyperparameters(self): + """Returns a list of all hyperparameter.""" + r = [ + Hyperparameter( + "k1__" + hyperparameter.name, + hyperparameter.value_type, + hyperparameter.bounds, + hyperparameter.n_elements, + ) + for hyperparameter in self.k1.hyperparameters + ] + + for hyperparameter in self.k2.hyperparameters: + r.append( + Hyperparameter( + "k2__" + hyperparameter.name, + hyperparameter.value_type, + hyperparameter.bounds, + hyperparameter.n_elements, + ) + ) + return r + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + return np.append(self.k1.theta, self.k2.theta) + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + k1_dims = self.k1.n_dims + self.k1.theta = theta[:k1_dims] + self.k2.theta = theta[k1_dims:] + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : ndarray of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + if self.k1.bounds.size == 0: + return self.k2.bounds + if self.k2.bounds.size == 0: + return self.k1.bounds + return np.vstack((self.k1.bounds, self.k2.bounds)) + + def __eq__(self, b): + if type(self) != type(b): + return False + return (self.k1 == b.k1 and self.k2 == b.k2) or ( + self.k1 == b.k2 and self.k2 == b.k1 + ) + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return self.k1.is_stationary() and self.k2.is_stationary() + + @property + def requires_vector_input(self): + """Returns whether the kernel is stationary.""" + return self.k1.requires_vector_input or self.k2.requires_vector_input + + +class Sum(KernelOperator): + """The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2` + and combines them via + + .. math:: + k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y) + + Note that the `__add__` magic method is overridden, so + `Sum(RBF(), RBF())` is equivalent to using the + operator + with `RBF() + RBF()`. + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + k1 : Kernel + The first base-kernel of the sum-kernel + + k2 : Kernel + The second base-kernel of the sum-kernel + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = Sum(ConstantKernel(2), RBF()) + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 1.0 + >>> kernel + 1.41**2 + RBF(length_scale=1) + """ + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K1, K1_gradient = self.k1(X, Y, eval_gradient=True) + K2, K2_gradient = self.k2(X, Y, eval_gradient=True) + return K1 + K2, np.dstack((K1_gradient, K2_gradient)) + else: + return self.k1(X, Y) + self.k2(X, Y) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to `np.diag(self(X))`; however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.k1.diag(X) + self.k2.diag(X) + + def __repr__(self): + return "{0} + {1}".format(self.k1, self.k2) + + +class Product(KernelOperator): + """The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2` + and combines them via + + .. math:: + k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y) + + Note that the `__mul__` magic method is overridden, so + `Product(RBF(), RBF())` is equivalent to using the * operator + with `RBF() * RBF()`. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + k1 : Kernel + The first base-kernel of the product-kernel + + k2 : Kernel + The second base-kernel of the product-kernel + + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import (RBF, Product, + ... ConstantKernel) + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = Product(ConstantKernel(2), RBF()) + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 1.0 + >>> kernel + 1.41**2 * RBF(length_scale=1) + """ + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_Y, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K1, K1_gradient = self.k1(X, Y, eval_gradient=True) + K2, K2_gradient = self.k2(X, Y, eval_gradient=True) + return K1 * K2, np.dstack( + (K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis]) + ) + else: + return self.k1(X, Y) * self.k2(X, Y) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.k1.diag(X) * self.k2.diag(X) + + def __repr__(self): + return "{0} * {1}".format(self.k1, self.k2) + + +class Exponentiation(Kernel): + """The Exponentiation kernel takes one base kernel and a scalar parameter + :math:`p` and combines them via + + .. math:: + k_{exp}(X, Y) = k(X, Y) ^p + + Note that the `__pow__` magic method is overridden, so + `Exponentiation(RBF(), 2)` is equivalent to using the ** operator + with `RBF() ** 2`. + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + kernel : Kernel + The base kernel + + exponent : float + The exponent for the base kernel + + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import (RationalQuadratic, + ... Exponentiation) + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = Exponentiation(RationalQuadratic(), exponent=2) + >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.419... + >>> gpr.predict(X[:1,:], return_std=True) + (array([635.5...]), array([0.559...])) + """ + + def __init__(self, kernel, exponent): + self.kernel = kernel + self.exponent = exponent + + def get_params(self, deep=True): + """Get parameters of this kernel. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + params = dict(kernel=self.kernel, exponent=self.exponent) + if deep: + deep_items = self.kernel.get_params().items() + params.update(("kernel__" + k, val) for k, val in deep_items) + return params + + @property + def hyperparameters(self): + """Returns a list of all hyperparameter.""" + r = [] + for hyperparameter in self.kernel.hyperparameters: + r.append( + Hyperparameter( + "kernel__" + hyperparameter.name, + hyperparameter.value_type, + hyperparameter.bounds, + hyperparameter.n_elements, + ) + ) + return r + + @property + def theta(self): + """Returns the (flattened, log-transformed) non-fixed hyperparameters. + + Note that theta are typically the log-transformed values of the + kernel's hyperparameters as this representation of the search space + is more amenable for hyperparameter search, as hyperparameters like + length-scales naturally live on a log-scale. + + Returns + ------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + return self.kernel.theta + + @theta.setter + def theta(self, theta): + """Sets the (flattened, log-transformed) non-fixed hyperparameters. + + Parameters + ---------- + theta : ndarray of shape (n_dims,) + The non-fixed, log-transformed hyperparameters of the kernel + """ + self.kernel.theta = theta + + @property + def bounds(self): + """Returns the log-transformed bounds on the theta. + + Returns + ------- + bounds : ndarray of shape (n_dims, 2) + The log-transformed bounds on the kernel's hyperparameters theta + """ + return self.kernel.bounds + + def __eq__(self, b): + if type(self) != type(b): + return False + return self.kernel == b.kernel and self.exponent == b.exponent + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_Y, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + if eval_gradient: + K, K_gradient = self.kernel(X, Y, eval_gradient=True) + K_gradient *= self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1) + return K**self.exponent, K_gradient + else: + K = self.kernel(X, Y, eval_gradient=False) + return K**self.exponent + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.kernel.diag(X) ** self.exponent + + def __repr__(self): + return "{0} ** {1}".format(self.kernel, self.exponent) + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return self.kernel.is_stationary() + + @property + def requires_vector_input(self): + """Returns whether the kernel is defined on discrete structures.""" + return self.kernel.requires_vector_input + + +class ConstantKernel(StationaryKernelMixin, GenericKernelMixin, Kernel): + """Constant kernel. + + Can be used as part of a product-kernel where it scales the magnitude of + the other factor (kernel) or as part of a sum-kernel, where it modifies + the mean of the Gaussian process. + + .. math:: + k(x_1, x_2) = constant\\_value \\;\\forall\\; x_1, x_2 + + Adding a constant kernel is equivalent to adding a constant:: + + kernel = RBF() + ConstantKernel(constant_value=2) + + is the same as:: + + kernel = RBF() + 2 + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + constant_value : float, default=1.0 + The constant value which defines the covariance: + k(x_1, x_2) = constant_value + + constant_value_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on `constant_value`. + If set to "fixed", `constant_value` cannot be changed during + hyperparameter tuning. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import RBF, ConstantKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = RBF() + ConstantKernel(constant_value=2) + >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3696... + >>> gpr.predict(X[:1,:], return_std=True) + (array([606.1...]), array([0.24...])) + """ + + def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)): + self.constant_value = constant_value + self.constant_value_bounds = constant_value_bounds + + @property + def hyperparameter_constant_value(self): + return Hyperparameter("constant_value", "numeric", self.constant_value_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object, \ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when eval_gradient + is True. + """ + if Y is None: + Y = X + elif eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + + K = np.full( + (_num_samples(X), _num_samples(Y)), + self.constant_value, + dtype=np.array(self.constant_value).dtype, + ) + if eval_gradient: + if not self.hyperparameter_constant_value.fixed: + return ( + K, + np.full( + (_num_samples(X), _num_samples(X), 1), + self.constant_value, + dtype=np.array(self.constant_value).dtype, + ), + ) + else: + return K, np.empty((_num_samples(X), _num_samples(X), 0)) + else: + return K + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return np.full( + _num_samples(X), + self.constant_value, + dtype=np.array(self.constant_value).dtype, + ) + + def __repr__(self): + return "{0:.3g}**2".format(np.sqrt(self.constant_value)) + + +class WhiteKernel(StationaryKernelMixin, GenericKernelMixin, Kernel): + """White kernel. + + The main use-case of this kernel is as part of a sum-kernel where it + explains the noise of the signal as independently and identically + normally-distributed. The parameter noise_level equals the variance of this + noise. + + .. math:: + k(x_1, x_2) = noise\\_level \\text{ if } x_i == x_j \\text{ else } 0 + + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + noise_level : float, default=1.0 + Parameter controlling the noise level (variance) + + noise_level_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'noise_level'. + If set to "fixed", 'noise_level' cannot be changed during + hyperparameter tuning. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = DotProduct() + WhiteKernel(noise_level=0.5) + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3680... + >>> gpr.predict(X[:2,:], return_std=True) + (array([653.0..., 592.1... ]), array([316.6..., 316.6...])) + """ + + def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)): + self.noise_level = noise_level + self.noise_level_bounds = noise_level_bounds + + @property + def hyperparameter_noise_level(self): + return Hyperparameter("noise_level", "numeric", self.noise_level_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Left argument of the returned kernel k(X, Y) + + Y : array-like of shape (n_samples_X, n_features) or list of object,\ + default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + is evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when eval_gradient + is True. + """ + if Y is not None and eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + + if Y is None: + K = self.noise_level * np.eye(_num_samples(X)) + if eval_gradient: + if not self.hyperparameter_noise_level.fixed: + return ( + K, + self.noise_level * np.eye(_num_samples(X))[:, :, np.newaxis], + ) + else: + return K, np.empty((_num_samples(X), _num_samples(X), 0)) + else: + return K + else: + return np.zeros((_num_samples(X), _num_samples(Y))) + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : array-like of shape (n_samples_X, n_features) or list of object + Argument to the kernel. + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return np.full( + _num_samples(X), self.noise_level, dtype=np.array(self.noise_level).dtype + ) + + def __repr__(self): + return "{0}(noise_level={1:.3g})".format( + self.__class__.__name__, self.noise_level + ) + + +class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel): + """Radial basis function kernel (aka squared-exponential kernel). + + The RBF kernel is a stationary kernel. It is also known as the + "squared exponential" kernel. It is parameterized by a length scale + parameter :math:`l>0`, which can either be a scalar (isotropic variant + of the kernel) or a vector with the same number of dimensions as the inputs + X (anisotropic variant of the kernel). The kernel is given by: + + .. math:: + k(x_i, x_j) = \\exp\\left(- \\frac{d(x_i, x_j)^2}{2l^2} \\right) + + where :math:`l` is the length scale of the kernel and + :math:`d(\\cdot,\\cdot)` is the Euclidean distance. + For advice on how to set the length scale parameter, see e.g. [1]_. + + This kernel is infinitely differentiable, which implies that GPs with this + kernel as covariance function have mean square derivatives of all orders, + and are thus very smooth. + See [2]_, Chapter 4, Section 4.2, for further details of the RBF kernel. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + length_scale : float or ndarray of shape (n_features,), default=1.0 + The length scale of the kernel. If a float, an isotropic kernel is + used. If an array, an anisotropic kernel is used where each dimension + of l defines the length-scale of the respective feature dimension. + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + References + ---------- + .. [1] `David Duvenaud (2014). "The Kernel Cookbook: + Advice on Covariance functions". + `_ + + .. [2] `Carl Edward Rasmussen, Christopher K. I. Williams (2006). + "Gaussian Processes for Machine Learning". The MIT Press. + `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import RBF + >>> X, y = load_iris(return_X_y=True) + >>> kernel = 1.0 * RBF(1.0) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9866... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8354..., 0.03228..., 0.1322...], + [0.7906..., 0.0652..., 0.1441...]]) + """ + + def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)): + self.length_scale = length_scale + self.length_scale_bounds = length_scale_bounds + + @property + def anisotropic(self): + return np.iterable(self.length_scale) and len(self.length_scale) > 1 + + @property + def hyperparameter_length_scale(self): + if self.anisotropic: + return Hyperparameter( + "length_scale", + "numeric", + self.length_scale_bounds, + len(self.length_scale), + ) + return Hyperparameter("length_scale", "numeric", self.length_scale_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + length_scale = _check_length_scale(X, self.length_scale) + if Y is None: + dists = pdist(X / length_scale, metric="sqeuclidean") + K = np.exp(-0.5 * dists) + # convert from upper-triangular matrix to square matrix + K = squareform(K) + np.fill_diagonal(K, 1) + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X / length_scale, Y / length_scale, metric="sqeuclidean") + K = np.exp(-0.5 * dists) + + if eval_gradient: + if self.hyperparameter_length_scale.fixed: + # Hyperparameter l kept fixed + return K, np.empty((X.shape[0], X.shape[0], 0)) + elif not self.anisotropic or length_scale.shape[0] == 1: + K_gradient = (K * squareform(dists))[:, :, np.newaxis] + return K, K_gradient + elif self.anisotropic: + # We need to recompute the pairwise dimension-wise distances + K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / ( + length_scale**2 + ) + K_gradient *= K[..., np.newaxis] + return K, K_gradient + else: + return K + + def __repr__(self): + if self.anisotropic: + return "{0}(length_scale=[{1}])".format( + self.__class__.__name__, + ", ".join(map("{0:.3g}".format, self.length_scale)), + ) + else: # isotropic + return "{0}(length_scale={1:.3g})".format( + self.__class__.__name__, np.ravel(self.length_scale)[0] + ) + + +class Matern(RBF): + """Matern kernel. + + The class of Matern kernels is a generalization of the :class:`RBF`. + It has an additional parameter :math:`\\nu` which controls the + smoothness of the resulting function. The smaller :math:`\\nu`, + the less smooth the approximated function is. + As :math:`\\nu\\rightarrow\\infty`, the kernel becomes equivalent to + the :class:`RBF` kernel. When :math:`\\nu = 1/2`, the Matérn kernel + becomes identical to the absolute exponential kernel. + Important intermediate values are + :math:`\\nu=1.5` (once differentiable functions) + and :math:`\\nu=2.5` (twice differentiable functions). + + The kernel is given by: + + .. math:: + k(x_i, x_j) = \\frac{1}{\\Gamma(\\nu)2^{\\nu-1}}\\Bigg( + \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j ) + \\Bigg)^\\nu K_\\nu\\Bigg( + \\frac{\\sqrt{2\\nu}}{l} d(x_i , x_j )\\Bigg) + + + + where :math:`d(\\cdot,\\cdot)` is the Euclidean distance, + :math:`K_{\\nu}(\\cdot)` is a modified Bessel function and + :math:`\\Gamma(\\cdot)` is the gamma function. + See [1]_, Chapter 4, Section 4.2, for details regarding the different + variants of the Matern kernel. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + length_scale : float or ndarray of shape (n_features,), default=1.0 + The length scale of the kernel. If a float, an isotropic kernel is + used. If an array, an anisotropic kernel is used where each dimension + of l defines the length-scale of the respective feature dimension. + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + nu : float, default=1.5 + The parameter nu controlling the smoothness of the learned function. + The smaller nu, the less smooth the approximated function is. + For nu=inf, the kernel becomes equivalent to the RBF kernel and for + nu=0.5 to the absolute exponential kernel. Important intermediate + values are nu=1.5 (once differentiable functions) and nu=2.5 + (twice differentiable functions). Note that values of nu not in + [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost + (appr. 10 times higher) since they require to evaluate the modified + Bessel function. Furthermore, in contrast to l, nu is kept fixed to + its initial value and not optimized. + + References + ---------- + .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006). + "Gaussian Processes for Machine Learning". The MIT Press. + `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import Matern + >>> X, y = load_iris(return_X_y=True) + >>> kernel = 1.0 * Matern(length_scale=1.0, nu=1.5) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9866... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8513..., 0.0368..., 0.1117...], + [0.8086..., 0.0693..., 0.1220...]]) + """ + + def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5): + super().__init__(length_scale, length_scale_bounds) + self.nu = nu + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + length_scale = _check_length_scale(X, self.length_scale) + if Y is None: + dists = pdist(X / length_scale, metric="euclidean") + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X / length_scale, Y / length_scale, metric="euclidean") + + if self.nu == 0.5: + K = np.exp(-dists) + elif self.nu == 1.5: + K = dists * math.sqrt(3) + K = (1.0 + K) * np.exp(-K) + elif self.nu == 2.5: + K = dists * math.sqrt(5) + K = (1.0 + K + K**2 / 3.0) * np.exp(-K) + elif self.nu == np.inf: + K = np.exp(-(dists**2) / 2.0) + else: # general case; expensive to evaluate + K = dists + K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan + tmp = math.sqrt(2 * self.nu) * K + K.fill((2 ** (1.0 - self.nu)) / gamma(self.nu)) + K *= tmp**self.nu + K *= kv(self.nu, tmp) + + if Y is None: + # convert from upper-triangular matrix to square matrix + K = squareform(K) + np.fill_diagonal(K, 1) + + if eval_gradient: + if self.hyperparameter_length_scale.fixed: + # Hyperparameter l kept fixed + K_gradient = np.empty((X.shape[0], X.shape[0], 0)) + return K, K_gradient + + # We need to recompute the pairwise dimension-wise distances + if self.anisotropic: + D = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 / (length_scale**2) + else: + D = squareform(dists**2)[:, :, np.newaxis] + + if self.nu == 0.5: + denominator = np.sqrt(D.sum(axis=2))[:, :, np.newaxis] + divide_result = np.zeros_like(D) + np.divide( + D, + denominator, + out=divide_result, + where=denominator != 0, + ) + K_gradient = K[..., np.newaxis] * divide_result + elif self.nu == 1.5: + K_gradient = 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis] + elif self.nu == 2.5: + tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis] + K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp) + elif self.nu == np.inf: + K_gradient = D * K[..., np.newaxis] + else: + # approximate gradient numerically + def f(theta): # helper function + return self.clone_with_theta(theta)(X, Y) + + return K, _approx_fprime(self.theta, f, 1e-10) + + if not self.anisotropic: + return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis] + else: + return K, K_gradient + else: + return K + + def __repr__(self): + if self.anisotropic: + return "{0}(length_scale=[{1}], nu={2:.3g})".format( + self.__class__.__name__, + ", ".join(map("{0:.3g}".format, self.length_scale)), + self.nu, + ) + else: + return "{0}(length_scale={1:.3g}, nu={2:.3g})".format( + self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu + ) + + +class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel): + """Rational Quadratic kernel. + + The RationalQuadratic kernel can be seen as a scale mixture (an infinite + sum) of RBF kernels with different characteristic length scales. It is + parameterized by a length scale parameter :math:`l>0` and a scale + mixture parameter :math:`\\alpha>0`. Only the isotropic variant + where length_scale :math:`l` is a scalar is supported at the moment. + The kernel is given by: + + .. math:: + k(x_i, x_j) = \\left( + 1 + \\frac{d(x_i, x_j)^2 }{ 2\\alpha l^2}\\right)^{-\\alpha} + + where :math:`\\alpha` is the scale mixture parameter, :math:`l` is + the length scale of the kernel and :math:`d(\\cdot,\\cdot)` is the + Euclidean distance. + For advice on how to set the parameters, see e.g. [1]_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + length_scale : float > 0, default=1.0 + The length scale of the kernel. + + alpha : float > 0, default=1.0 + Scale mixture parameter + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + alpha_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'alpha'. + If set to "fixed", 'alpha' cannot be changed during + hyperparameter tuning. + + References + ---------- + .. [1] `David Duvenaud (2014). "The Kernel Cookbook: + Advice on Covariance functions". + `_ + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import RationalQuadratic + >>> X, y = load_iris(return_X_y=True) + >>> kernel = RationalQuadratic(length_scale=1.0, alpha=1.5) + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9733... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8881..., 0.0566..., 0.05518...], + [0.8678..., 0.0707... , 0.0614...]]) + """ + + def __init__( + self, + length_scale=1.0, + alpha=1.0, + length_scale_bounds=(1e-5, 1e5), + alpha_bounds=(1e-5, 1e5), + ): + self.length_scale = length_scale + self.alpha = alpha + self.length_scale_bounds = length_scale_bounds + self.alpha_bounds = alpha_bounds + + @property + def hyperparameter_length_scale(self): + return Hyperparameter("length_scale", "numeric", self.length_scale_bounds) + + @property + def hyperparameter_alpha(self): + return Hyperparameter("alpha", "numeric", self.alpha_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims) + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when eval_gradient + is True. + """ + if len(np.atleast_1d(self.length_scale)) > 1: + raise AttributeError( + "RationalQuadratic kernel only supports isotropic version, " + "please use a single scalar for length_scale" + ) + X = np.atleast_2d(X) + if Y is None: + dists = squareform(pdist(X, metric="sqeuclidean")) + tmp = dists / (2 * self.alpha * self.length_scale**2) + base = 1 + tmp + K = base**-self.alpha + np.fill_diagonal(K, 1) + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X, Y, metric="sqeuclidean") + K = (1 + dists / (2 * self.alpha * self.length_scale**2)) ** -self.alpha + + if eval_gradient: + # gradient with respect to length_scale + if not self.hyperparameter_length_scale.fixed: + length_scale_gradient = dists * K / (self.length_scale**2 * base) + length_scale_gradient = length_scale_gradient[:, :, np.newaxis] + else: # l is kept fixed + length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) + + # gradient with respect to alpha + if not self.hyperparameter_alpha.fixed: + alpha_gradient = K * ( + -self.alpha * np.log(base) + + dists / (2 * self.length_scale**2 * base) + ) + alpha_gradient = alpha_gradient[:, :, np.newaxis] + else: # alpha is kept fixed + alpha_gradient = np.empty((K.shape[0], K.shape[1], 0)) + + return K, np.dstack((alpha_gradient, length_scale_gradient)) + else: + return K + + def __repr__(self): + return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format( + self.__class__.__name__, self.alpha, self.length_scale + ) + + +class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel): + r"""Exp-Sine-Squared kernel (aka periodic kernel). + + The ExpSineSquared kernel allows one to model functions which repeat + themselves exactly. It is parameterized by a length scale + parameter :math:`l>0` and a periodicity parameter :math:`p>0`. + Only the isotropic variant where :math:`l` is a scalar is + supported at the moment. The kernel is given by: + + .. math:: + k(x_i, x_j) = \text{exp}\left(- + \frac{ 2\sin^2(\pi d(x_i, x_j)/p) }{ l^ 2} \right) + + where :math:`l` is the length scale of the kernel, :math:`p` the + periodicity of the kernel and :math:`d(\cdot,\cdot)` is the + Euclidean distance. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + + length_scale : float > 0, default=1.0 + The length scale of the kernel. + + periodicity : float > 0, default=1.0 + The periodicity of the kernel. + + length_scale_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'length_scale'. + If set to "fixed", 'length_scale' cannot be changed during + hyperparameter tuning. + + periodicity_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'periodicity'. + If set to "fixed", 'periodicity' cannot be changed during + hyperparameter tuning. + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import ExpSineSquared + >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0) + >>> kernel = ExpSineSquared(length_scale=1, periodicity=1) + >>> gpr = GaussianProcessRegressor(kernel=kernel, alpha=5, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.0144... + >>> gpr.predict(X[:2,:], return_std=True) + (array([425.6..., 457.5...]), array([0.3894..., 0.3467...])) + """ + + def __init__( + self, + length_scale=1.0, + periodicity=1.0, + length_scale_bounds=(1e-5, 1e5), + periodicity_bounds=(1e-5, 1e5), + ): + self.length_scale = length_scale + self.periodicity = periodicity + self.length_scale_bounds = length_scale_bounds + self.periodicity_bounds = periodicity_bounds + + @property + def hyperparameter_length_scale(self): + """Returns the length scale""" + return Hyperparameter("length_scale", "numeric", self.length_scale_bounds) + + @property + def hyperparameter_periodicity(self): + return Hyperparameter("periodicity", "numeric", self.periodicity_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + if Y is None: + dists = squareform(pdist(X, metric="euclidean")) + arg = np.pi * dists / self.periodicity + sin_of_arg = np.sin(arg) + K = np.exp(-2 * (sin_of_arg / self.length_scale) ** 2) + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + dists = cdist(X, Y, metric="euclidean") + K = np.exp( + -2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2 + ) + + if eval_gradient: + cos_of_arg = np.cos(arg) + # gradient with respect to length_scale + if not self.hyperparameter_length_scale.fixed: + length_scale_gradient = 4 / self.length_scale**2 * sin_of_arg**2 * K + length_scale_gradient = length_scale_gradient[:, :, np.newaxis] + else: # length_scale is kept fixed + length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) + # gradient with respect to p + if not self.hyperparameter_periodicity.fixed: + periodicity_gradient = ( + 4 * arg / self.length_scale**2 * cos_of_arg * sin_of_arg * K + ) + periodicity_gradient = periodicity_gradient[:, :, np.newaxis] + else: # p is kept fixed + periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0)) + + return K, np.dstack((length_scale_gradient, periodicity_gradient)) + else: + return K + + def __repr__(self): + return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format( + self.__class__.__name__, self.length_scale, self.periodicity + ) + + +class DotProduct(Kernel): + r"""Dot-Product kernel. + + The DotProduct kernel is non-stationary and can be obtained from linear + regression by putting :math:`N(0, 1)` priors on the coefficients + of :math:`x_d (d = 1, . . . , D)` and a prior of :math:`N(0, \sigma_0^2)` + on the bias. The DotProduct kernel is invariant to a rotation of + the coordinates about the origin, but not translations. + It is parameterized by a parameter sigma_0 :math:`\sigma` + which controls the inhomogenity of the kernel. For :math:`\sigma_0^2 =0`, + the kernel is called the homogeneous linear kernel, otherwise + it is inhomogeneous. The kernel is given by + + .. math:: + k(x_i, x_j) = \sigma_0 ^ 2 + x_i \cdot x_j + + The DotProduct kernel is commonly combined with exponentiation. + + See [1]_, Chapter 4, Section 4.2, for further details regarding the + DotProduct kernel. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + sigma_0 : float >= 0, default=1.0 + Parameter controlling the inhomogenity of the kernel. If sigma_0=0, + the kernel is homogeneous. + + sigma_0_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'sigma_0'. + If set to "fixed", 'sigma_0' cannot be changed during + hyperparameter tuning. + + References + ---------- + .. [1] `Carl Edward Rasmussen, Christopher K. I. Williams (2006). + "Gaussian Processes for Machine Learning". The MIT Press. + `_ + + Examples + -------- + >>> from sklearn.datasets import make_friedman2 + >>> from sklearn.gaussian_process import GaussianProcessRegressor + >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel + >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) + >>> kernel = DotProduct() + WhiteKernel() + >>> gpr = GaussianProcessRegressor(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpr.score(X, y) + 0.3680... + >>> gpr.predict(X[:2,:], return_std=True) + (array([653.0..., 592.1...]), array([316.6..., 316.6...])) + """ + + def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)): + self.sigma_0 = sigma_0 + self.sigma_0_bounds = sigma_0_bounds + + @property + def hyperparameter_sigma_0(self): + return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + X = np.atleast_2d(X) + if Y is None: + K = np.inner(X, X) + self.sigma_0**2 + else: + if eval_gradient: + raise ValueError("Gradient can only be evaluated when Y is None.") + K = np.inner(X, Y) + self.sigma_0**2 + + if eval_gradient: + if not self.hyperparameter_sigma_0.fixed: + K_gradient = np.empty((K.shape[0], K.shape[1], 1)) + K_gradient[..., 0] = 2 * self.sigma_0**2 + return K, K_gradient + else: + return K, np.empty((X.shape[0], X.shape[0], 0)) + else: + return K + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y). + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X). + """ + return np.einsum("ij,ij->i", X, X) + self.sigma_0**2 + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return False + + def __repr__(self): + return "{0}(sigma_0={1:.3g})".format(self.__class__.__name__, self.sigma_0) + + +# adapted from scipy/optimize/optimize.py for functions with 2d output +def _approx_fprime(xk, f, epsilon, args=()): + f0 = f(*((xk,) + args)) + grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float) + ei = np.zeros((len(xk),), float) + for k in range(len(xk)): + ei[k] = 1.0 + d = epsilon * ei + grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k] + ei[k] = 0.0 + return grad + + +class PairwiseKernel(Kernel): + """Wrapper for kernels in sklearn.metrics.pairwise. + + A thin wrapper around the functionality of the kernels in + sklearn.metrics.pairwise. + + Note: Evaluation of eval_gradient is not analytic but numeric and all + kernels support only isotropic distances. The parameter gamma is + considered to be a hyperparameter and may be optimized. The other + kernel parameters are set directly at initialization and are kept + fixed. + + .. versionadded:: 0.18 + + Parameters + ---------- + gamma : float, default=1.0 + Parameter gamma of the pairwise kernel specified by metric. It should + be positive. + + gamma_bounds : pair of floats >= 0 or "fixed", default=(1e-5, 1e5) + The lower and upper bound on 'gamma'. + If set to "fixed", 'gamma' cannot be changed during + hyperparameter tuning. + + metric : {"linear", "additive_chi2", "chi2", "poly", "polynomial", \ + "rbf", "laplacian", "sigmoid", "cosine"} or callable, \ + default="linear" + The metric to use when calculating kernel between instances in a + feature array. If metric is a string, it must be one of the metrics + in pairwise.PAIRWISE_KERNEL_FUNCTIONS. + If metric is "precomputed", X is assumed to be a kernel matrix. + Alternatively, if metric is a callable function, it is called on each + pair of instances (rows) and the resulting value recorded. The callable + should take two arrays from X as input and return a value indicating + the distance between them. + + pairwise_kernels_kwargs : dict, default=None + All entries of this dict (if any) are passed as keyword arguments to + the pairwise kernel function. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.gaussian_process import GaussianProcessClassifier + >>> from sklearn.gaussian_process.kernels import PairwiseKernel + >>> X, y = load_iris(return_X_y=True) + >>> kernel = PairwiseKernel(metric='rbf') + >>> gpc = GaussianProcessClassifier(kernel=kernel, + ... random_state=0).fit(X, y) + >>> gpc.score(X, y) + 0.9733... + >>> gpc.predict_proba(X[:2,:]) + array([[0.8880..., 0.05663..., 0.05532...], + [0.8676..., 0.07073..., 0.06165...]]) + """ + + def __init__( + self, + gamma=1.0, + gamma_bounds=(1e-5, 1e5), + metric="linear", + pairwise_kernels_kwargs=None, + ): + self.gamma = gamma + self.gamma_bounds = gamma_bounds + self.metric = metric + self.pairwise_kernels_kwargs = pairwise_kernels_kwargs + + @property + def hyperparameter_gamma(self): + return Hyperparameter("gamma", "numeric", self.gamma_bounds) + + def __call__(self, X, Y=None, eval_gradient=False): + """Return the kernel k(X, Y) and optionally its gradient. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Y : ndarray of shape (n_samples_Y, n_features), default=None + Right argument of the returned kernel k(X, Y). If None, k(X, X) + if evaluated instead. + + eval_gradient : bool, default=False + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is None. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + """ + pairwise_kernels_kwargs = self.pairwise_kernels_kwargs + if self.pairwise_kernels_kwargs is None: + pairwise_kernels_kwargs = {} + + X = np.atleast_2d(X) + K = pairwise_kernels( + X, + Y, + metric=self.metric, + gamma=self.gamma, + filter_params=True, + **pairwise_kernels_kwargs, + ) + if eval_gradient: + if self.hyperparameter_gamma.fixed: + return K, np.empty((X.shape[0], X.shape[0], 0)) + else: + # approximate gradient numerically + def f(gamma): # helper function + return pairwise_kernels( + X, + Y, + metric=self.metric, + gamma=np.exp(gamma), + filter_params=True, + **pairwise_kernels_kwargs, + ) + + return K, _approx_fprime(self.theta, f, 1e-10) + else: + return K + + def diag(self, X): + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + # We have to fall back to slow way of computing diagonal + return np.apply_along_axis(self, 1, X).ravel() + + def is_stationary(self): + """Returns whether the kernel is stationary.""" + return self.metric in ["rbf"] + + def __repr__(self): + return "{0}(gamma={1}, metric={2})".format( + self.__class__.__name__, self.gamma, self.metric + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py new file mode 100644 index 0000000000000000000000000000000000000000..f49ed71231ad9db5891a0417f99ec5dd4fbf6f51 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/gaussian_process/tests/test_gpr.py @@ -0,0 +1,848 @@ +"""Testing for Gaussian process regression""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import re +import sys +import warnings + +import numpy as np +import pytest +from scipy.optimize import approx_fprime + +from sklearn.exceptions import ConvergenceWarning +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import ( + RBF, + DotProduct, + ExpSineSquared, + WhiteKernel, +) +from sklearn.gaussian_process.kernels import ( + ConstantKernel as C, +) +from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_less, +) + + +def f(x): + return x * np.sin(x) + + +X = np.atleast_2d([1.0, 3.0, 5.0, 6.0, 7.0, 8.0]).T +X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T +y = f(X).ravel() + +fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed") +kernels = [ + RBF(length_scale=1.0), + fixed_kernel, + RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), + C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), + C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) + + C(1e-5, (1e-5, 1e2)), + C(0.1, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) + + C(1e-5, (1e-5, 1e2)), +] +non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel] + + +@pytest.mark.parametrize("kernel", kernels) +def test_gpr_interpolation(kernel): + if sys.maxsize <= 2**32: + pytest.xfail("This test may fail on 32 bit Python") + + # Test the interpolating property for different kernels. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + y_pred, y_cov = gpr.predict(X, return_cov=True) + + assert_almost_equal(y_pred, y) + assert_almost_equal(np.diag(y_cov), 0.0) + + +def test_gpr_interpolation_structured(): + # Test the interpolating property for different kernels. + kernel = MiniSeqKernel(baseline_similarity_bounds="fixed") + X = ["A", "B", "C"] + y = np.array([1, 2, 3]) + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + y_pred, y_cov = gpr.predict(X, return_cov=True) + + assert_almost_equal( + kernel(X, eval_gradient=True)[1].ravel(), (1 - np.eye(len(X))).ravel() + ) + assert_almost_equal(y_pred, y) + assert_almost_equal(np.diag(y_cov), 0.0) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_lml_improving(kernel): + if sys.maxsize <= 2**32: + pytest.xfail("This test may fail on 32 bit Python") + + # Test that hyperparameter-tuning improves log-marginal likelihood. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood( + kernel.theta + ) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_precomputed(kernel): + # Test that lml of optimized kernel is stored correctly. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + assert gpr.log_marginal_likelihood(gpr.kernel_.theta) == pytest.approx( + gpr.log_marginal_likelihood() + ) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_without_cloning_kernel(kernel): + # Test that lml of optimized kernel is stored correctly. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64) + + gpr.log_marginal_likelihood(input_theta, clone_kernel=False) + assert_almost_equal(gpr.kernel_.theta, input_theta, 7) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_converged_to_local_maximum(kernel): + # Test that we are in local maximum after hyperparameter-optimization. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + lml, lml_gradient = gpr.log_marginal_likelihood(gpr.kernel_.theta, True) + + assert np.all( + (np.abs(lml_gradient) < 1e-4) + | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) + | (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1]) + ) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_solution_inside_bounds(kernel): + # Test that hyperparameter-optimization remains in bounds# + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + bounds = gpr.kernel_.bounds + max_ = np.finfo(gpr.kernel_.theta.dtype).max + tiny = 1e-10 + bounds[~np.isfinite(bounds[:, 1]), 1] = max_ + + assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny) + assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny) + + +@pytest.mark.parametrize("kernel", kernels) +def test_lml_gradient(kernel): + # Compare analytic and numeric gradient of log marginal likelihood. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True) + lml_gradient_approx = approx_fprime( + kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10 + ) + + assert_almost_equal(lml_gradient, lml_gradient_approx, 3) + + +@pytest.mark.parametrize("kernel", kernels) +def test_prior(kernel): + # Test that GP prior has mean 0 and identical variances. + gpr = GaussianProcessRegressor(kernel=kernel) + + y_mean, y_cov = gpr.predict(X, return_cov=True) + + assert_almost_equal(y_mean, 0, 5) + if len(gpr.kernel.theta) > 1: + # XXX: quite hacky, works only for current kernels + assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5) + else: + assert_almost_equal(np.diag(y_cov), 1, 5) + + +@pytest.mark.parametrize("kernel", kernels) +def test_sample_statistics(kernel): + # Test that statistics of samples drawn from GP are correct. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + + y_mean, y_cov = gpr.predict(X2, return_cov=True) + + samples = gpr.sample_y(X2, 300000) + + # More digits accuracy would require many more samples + assert_almost_equal(y_mean, np.mean(samples, 1), 1) + assert_almost_equal( + np.diag(y_cov) / np.diag(y_cov).max(), + np.var(samples, 1) / np.diag(y_cov).max(), + 1, + ) + + +def test_no_optimizer(): + # Test that kernel parameters are unmodified when optimizer is None. + kernel = RBF(1.0) + gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y) + assert np.exp(gpr.kernel_.theta) == 1.0 + + +@pytest.mark.parametrize("kernel", kernels) +@pytest.mark.parametrize("target", [y, np.ones(X.shape[0], dtype=np.float64)]) +def test_predict_cov_vs_std(kernel, target): + if sys.maxsize <= 2**32: + pytest.xfail("This test may fail on 32 bit Python") + + # Test that predicted std.-dev. is consistent with cov's diagonal. + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + y_mean, y_cov = gpr.predict(X2, return_cov=True) + y_mean, y_std = gpr.predict(X2, return_std=True) + assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std) + + +def test_anisotropic_kernel(): + # Test that GPR can identify meaningful anisotropic length-scales. + # We learn a function which varies in one dimension ten-times slower + # than in the other. The corresponding length-scales should differ by at + # least a factor 5 + rng = np.random.RandomState(0) + X = rng.uniform(-1, 1, (50, 2)) + y = X[:, 0] + 0.1 * X[:, 1] + + kernel = RBF([1.0, 1.0]) + gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y) + assert np.exp(gpr.kernel_.theta[1]) > np.exp(gpr.kernel_.theta[0]) * 5 + + +def test_random_starts(): + # Test that an increasing number of random-starts of GP fitting only + # increases the log marginal likelihood of the chosen theta. + n_samples, n_features = 25, 2 + rng = np.random.RandomState(0) + X = rng.randn(n_samples, n_features) * 2 - 1 + y = ( + np.sin(X).sum(axis=1) + + np.sin(3 * X).sum(axis=1) + + rng.normal(scale=0.1, size=n_samples) + ) + + kernel = C(1.0, (1e-2, 1e2)) * RBF( + length_scale=[1.0] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features + ) + WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1)) + last_lml = -np.inf + for n_restarts_optimizer in range(5): + gp = GaussianProcessRegressor( + kernel=kernel, + n_restarts_optimizer=n_restarts_optimizer, + random_state=0, + ).fit(X, y) + lml = gp.log_marginal_likelihood(gp.kernel_.theta) + assert lml > last_lml - np.finfo(np.float32).eps + last_lml = lml + + +@pytest.mark.parametrize("kernel", kernels) +def test_y_normalization(kernel): + """ + Test normalization of the target values in GP + + Fitting non-normalizing GP on normalized y and fitting normalizing GP + on unnormalized y should yield identical results. Note that, here, + 'normalized y' refers to y that has been made zero mean and unit + variance. + + """ + + y_mean = np.mean(y) + y_std = np.std(y) + y_norm = (y - y_mean) / y_std + + # Fit non-normalizing GP on normalized y + gpr = GaussianProcessRegressor(kernel=kernel) + gpr.fit(X, y_norm) + + # Fit normalizing GP on unnormalized y + gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr_norm.fit(X, y) + + # Compare predicted mean, std-devs and covariances + y_pred, y_pred_std = gpr.predict(X2, return_std=True) + y_pred = y_pred * y_std + y_mean + y_pred_std = y_pred_std * y_std + y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True) + + assert_almost_equal(y_pred, y_pred_norm) + assert_almost_equal(y_pred_std, y_pred_std_norm) + + _, y_cov = gpr.predict(X2, return_cov=True) + y_cov = y_cov * y_std**2 + _, y_cov_norm = gpr_norm.predict(X2, return_cov=True) + + assert_almost_equal(y_cov, y_cov_norm) + + +def test_large_variance_y(): + """ + Here we test that, when noramlize_y=True, our GP can produce a + sensible fit to training data whose variance is significantly + larger than unity. This test was made in response to issue #15612. + + GP predictions are verified against predictions that were made + using GPy which, here, is treated as the 'gold standard'. Note that we + only investigate the RBF kernel here, as that is what was used in the + GPy implementation. + + The following code can be used to recreate the GPy data: + + -------------------------------------------------------------------------- + import GPy + + kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.) + gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy) + gpy.optimize() + y_pred_gpy, y_var_gpy = gpy.predict(X2) + y_pred_std_gpy = np.sqrt(y_var_gpy) + -------------------------------------------------------------------------- + """ + + # Here we utilise a larger variance version of the training data + y_large = 10 * y + + # Standard GP with normalize_y=True + RBF_params = {"length_scale": 1.0} + kernel = RBF(**RBF_params) + gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr.fit(X, y_large) + y_pred, y_pred_std = gpr.predict(X2, return_std=True) + + # 'Gold standard' mean predictions from GPy + y_pred_gpy = np.array( + [15.16918303, -27.98707845, -39.31636019, 14.52605515, 69.18503589] + ) + + # 'Gold standard' std predictions from GPy + y_pred_std_gpy = np.array( + [7.78860962, 3.83179178, 0.63149951, 0.52745188, 0.86170042] + ) + + # Based on numerical experiments, it's reasonable to expect our + # GP's mean predictions to get within 7% of predictions of those + # made by GPy. + assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0) + + # Based on numerical experiments, it's reasonable to expect our + # GP's std predictions to get within 15% of predictions of those + # made by GPy. + assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0) + + +def test_y_multioutput(): + # Test that GPR can deal with multi-dimensional target values + y_2d = np.vstack((y, y * 2)).T + + # Test for fixed kernel that first dimension of 2d GP equals the output + # of 1d GP and that second dimension is twice as large + kernel = RBF(length_scale=1.0) + + gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False) + gpr.fit(X, y) + + gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False) + gpr_2d.fit(X, y_2d) + + y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True) + y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True) + _, y_cov_1d = gpr.predict(X2, return_cov=True) + _, y_cov_2d = gpr_2d.predict(X2, return_cov=True) + + assert_almost_equal(y_pred_1d, y_pred_2d[:, 0]) + assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2) + + # Standard deviation and covariance do not depend on output + for target in range(y_2d.shape[1]): + assert_almost_equal(y_std_1d, y_std_2d[..., target]) + assert_almost_equal(y_cov_1d, y_cov_2d[..., target]) + + y_sample_1d = gpr.sample_y(X2, n_samples=10) + y_sample_2d = gpr_2d.sample_y(X2, n_samples=10) + + assert y_sample_1d.shape == (5, 10) + assert y_sample_2d.shape == (5, 2, 10) + # Only the first target will be equal + assert_almost_equal(y_sample_1d, y_sample_2d[:, 0, :]) + + # Test hyperparameter optimization + for kernel in kernels: + gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr.fit(X, y) + + gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr_2d.fit(X, np.vstack((y, y)).T) + + assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4) + + +@pytest.mark.parametrize("kernel", non_fixed_kernels) +def test_custom_optimizer(kernel): + # Test that GPR can use externally defined optimizers. + # Define a dummy optimizer that simply tests 50 random hyperparameters + def optimizer(obj_func, initial_theta, bounds): + rng = np.random.RandomState(0) + theta_opt, func_min = initial_theta, obj_func( + initial_theta, eval_gradient=False + ) + for _ in range(50): + theta = np.atleast_1d( + rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1])) + ) + f = obj_func(theta, eval_gradient=False) + if f < func_min: + theta_opt, func_min = theta, f + return theta_opt, func_min + + gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer) + gpr.fit(X, y) + # Checks that optimizer improved marginal likelihood + assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood( + gpr.kernel.theta + ) + + +def test_gpr_correct_error_message(): + X = np.arange(12).reshape(6, -1) + y = np.ones(6) + kernel = DotProduct() + gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0) + message = ( + "The kernel, %s, is not returning a " + "positive definite matrix. Try gradually increasing " + "the 'alpha' parameter of your " + "GaussianProcessRegressor estimator." % kernel + ) + with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)): + gpr.fit(X, y) + + +@pytest.mark.parametrize("kernel", kernels) +def test_duplicate_input(kernel): + # Test GPR can handle two different output-values for the same input. + gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2) + gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2) + + X_ = np.vstack((X, X[0])) + y_ = np.hstack((y, y[0] + 1)) + gpr_equal_inputs.fit(X_, y_) + + X_ = np.vstack((X, X[0] + 1e-15)) + y_ = np.hstack((y, y[0] + 1)) + gpr_similar_inputs.fit(X_, y_) + + X_test = np.linspace(0, 10, 100)[:, None] + y_pred_equal, y_std_equal = gpr_equal_inputs.predict(X_test, return_std=True) + y_pred_similar, y_std_similar = gpr_similar_inputs.predict(X_test, return_std=True) + + assert_almost_equal(y_pred_equal, y_pred_similar) + assert_almost_equal(y_std_equal, y_std_similar) + + +def test_no_fit_default_predict(): + # Test that GPR predictions without fit does not break by default. + default_kernel = C(1.0, constant_value_bounds="fixed") * RBF( + 1.0, length_scale_bounds="fixed" + ) + gpr1 = GaussianProcessRegressor() + _, y_std1 = gpr1.predict(X, return_std=True) + _, y_cov1 = gpr1.predict(X, return_cov=True) + + gpr2 = GaussianProcessRegressor(kernel=default_kernel) + _, y_std2 = gpr2.predict(X, return_std=True) + _, y_cov2 = gpr2.predict(X, return_cov=True) + + assert_array_almost_equal(y_std1, y_std2) + assert_array_almost_equal(y_cov1, y_cov2) + + +def test_warning_bounds(): + kernel = RBF(length_scale_bounds=[1e-5, 1e-3]) + gpr = GaussianProcessRegressor(kernel=kernel) + warning_message = ( + "The optimal value found for dimension 0 of parameter " + "length_scale is close to the specified upper bound " + "0.001. Increasing the bound and calling fit again may " + "find a better value." + ) + with pytest.warns(ConvergenceWarning, match=warning_message): + gpr.fit(X, y) + + kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF( + length_scale_bounds=[1e3, 1e5] + ) + gpr_sum = GaussianProcessRegressor(kernel=kernel_sum) + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + gpr_sum.fit(X, y) + + assert len(record) == 2 + + assert issubclass(record[0].category, ConvergenceWarning) + assert ( + record[0].message.args[0] == "The optimal value found for " + "dimension 0 of parameter " + "k1__noise_level is close to the " + "specified upper bound 0.001. " + "Increasing the bound and calling " + "fit again may find a better value." + ) + + assert issubclass(record[1].category, ConvergenceWarning) + assert ( + record[1].message.args[0] == "The optimal value found for " + "dimension 0 of parameter " + "k2__length_scale is close to the " + "specified lower bound 1000.0. " + "Decreasing the bound and calling " + "fit again may find a better value." + ) + + X_tile = np.tile(X, 2) + kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2]) + gpr_dims = GaussianProcessRegressor(kernel=kernel_dims) + + with warnings.catch_warnings(record=True) as record: + warnings.simplefilter("always") + gpr_dims.fit(X_tile, y) + + assert len(record) == 2 + + assert issubclass(record[0].category, ConvergenceWarning) + assert ( + record[0].message.args[0] == "The optimal value found for " + "dimension 0 of parameter " + "length_scale is close to the " + "specified lower bound 10.0. " + "Decreasing the bound and calling " + "fit again may find a better value." + ) + + assert issubclass(record[1].category, ConvergenceWarning) + assert ( + record[1].message.args[0] == "The optimal value found for " + "dimension 1 of parameter " + "length_scale is close to the " + "specified lower bound 10.0. " + "Decreasing the bound and calling " + "fit again may find a better value." + ) + + +def test_bound_check_fixed_hyperparameter(): + # Regression test for issue #17943 + # Check that having a hyperparameter with fixed bounds doesn't cause an + # error + k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend + k2 = ExpSineSquared( + length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed" + ) # seasonal component + kernel = k1 + k2 + GaussianProcessRegressor(kernel=kernel).fit(X, y) + + +@pytest.mark.parametrize("kernel", kernels) +def test_constant_target(kernel): + """Check that the std. dev. is affected to 1 when normalizing a constant + feature. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/18318 + NaN where affected to the target when scaling due to null std. dev. with + constant target. + """ + y_constant = np.ones(X.shape[0], dtype=np.float64) + + gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True) + gpr.fit(X, y_constant) + assert gpr._y_train_std == pytest.approx(1.0) + + y_pred, y_cov = gpr.predict(X, return_cov=True) + assert_allclose(y_pred, y_constant) + # set atol because we compare to zero + assert_allclose(np.diag(y_cov), 0.0, atol=1e-9) + + # Test multi-target data + n_samples, n_targets = X.shape[0], 2 + rng = np.random.RandomState(0) + y = np.concatenate( + [ + rng.normal(size=(n_samples, 1)), # non-constant target + np.full(shape=(n_samples, 1), fill_value=2), # constant target + ], + axis=1, + ) + + gpr.fit(X, y) + Y_pred, Y_cov = gpr.predict(X, return_cov=True) + + assert_allclose(Y_pred[:, 1], 2) + assert_allclose(np.diag(Y_cov[..., 1]), 0.0, atol=1e-9) + + assert Y_pred.shape == (n_samples, n_targets) + assert Y_cov.shape == (n_samples, n_samples, n_targets) + + +def test_gpr_consistency_std_cov_non_invertible_kernel(): + """Check the consistency between the returned std. dev. and the covariance. + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/19936 + Inconsistencies were observed when the kernel cannot be inverted (or + numerically stable). + """ + kernel = C(8.98576054e05, (1e-12, 1e12)) * RBF( + [5.91326520e02, 1.32584051e03], (1e-12, 1e12) + ) + WhiteKernel(noise_level=1e-5) + gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None) + X_train = np.array( + [ + [0.0, 0.0], + [1.54919334, -0.77459667], + [-1.54919334, 0.0], + [0.0, -1.54919334], + [0.77459667, 0.77459667], + [-0.77459667, 1.54919334], + ] + ) + y_train = np.array( + [ + [-2.14882017e-10], + [-4.66975823e00], + [4.01823986e00], + [-1.30303674e00], + [-1.35760156e00], + [3.31215668e00], + ] + ) + gpr.fit(X_train, y_train) + X_test = np.array( + [ + [-1.93649167, -1.93649167], + [1.93649167, -1.93649167], + [-1.93649167, 1.93649167], + [1.93649167, 1.93649167], + ] + ) + pred1, std = gpr.predict(X_test, return_std=True) + pred2, cov = gpr.predict(X_test, return_cov=True) + assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5) + + +@pytest.mark.parametrize( + "params, TypeError, err_msg", + [ + ( + {"alpha": np.zeros(100)}, + ValueError, + "alpha must be a scalar or an array with same number of entries as y", + ), + ( + { + "kernel": WhiteKernel(noise_level_bounds=(-np.inf, np.inf)), + "n_restarts_optimizer": 2, + }, + ValueError, + "requires that all bounds are finite", + ), + ], +) +def test_gpr_fit_error(params, TypeError, err_msg): + """Check that expected error are raised during fit.""" + gpr = GaussianProcessRegressor(**params) + with pytest.raises(TypeError, match=err_msg): + gpr.fit(X, y) + + +def test_gpr_lml_error(): + """Check that we raise the proper error in the LML method.""" + gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y) + + err_msg = "Gradient can only be evaluated for theta!=None" + with pytest.raises(ValueError, match=err_msg): + gpr.log_marginal_likelihood(eval_gradient=True) + + +def test_gpr_predict_error(): + """Check that we raise the proper error during predict.""" + gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y) + + err_msg = "At most one of return_std or return_cov can be requested." + with pytest.raises(RuntimeError, match=err_msg): + gpr.predict(X, return_cov=True, return_std=True) + + +@pytest.mark.parametrize("normalize_y", [True, False]) +@pytest.mark.parametrize("n_targets", [None, 1, 10]) +def test_predict_shapes(normalize_y, n_targets): + """Check the shapes of y_mean, y_std, and y_cov in single-output + (n_targets=None) and multi-output settings, including the edge case when + n_targets=1, where the sklearn convention is to squeeze the predictions. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/17394 + https://github.com/scikit-learn/scikit-learn/issues/18065 + https://github.com/scikit-learn/scikit-learn/issues/22174 + """ + rng = np.random.RandomState(1234) + + n_features, n_samples_train, n_samples_test = 6, 9, 7 + + y_train_shape = (n_samples_train,) + if n_targets is not None: + y_train_shape = y_train_shape + (n_targets,) + + # By convention single-output data is squeezed upon prediction + y_test_shape = (n_samples_test,) + if n_targets is not None and n_targets > 1: + y_test_shape = y_test_shape + (n_targets,) + + X_train = rng.randn(n_samples_train, n_features) + X_test = rng.randn(n_samples_test, n_features) + y_train = rng.randn(*y_train_shape) + + model = GaussianProcessRegressor(normalize_y=normalize_y) + model.fit(X_train, y_train) + + y_pred, y_std = model.predict(X_test, return_std=True) + _, y_cov = model.predict(X_test, return_cov=True) + + assert y_pred.shape == y_test_shape + assert y_std.shape == y_test_shape + assert y_cov.shape == (n_samples_test,) + y_test_shape + + +@pytest.mark.parametrize("normalize_y", [True, False]) +@pytest.mark.parametrize("n_targets", [None, 1, 10]) +def test_sample_y_shapes(normalize_y, n_targets): + """Check the shapes of y_samples in single-output (n_targets=0) and + multi-output settings, including the edge case when n_targets=1, where the + sklearn convention is to squeeze the predictions. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/22175 + """ + rng = np.random.RandomState(1234) + + n_features, n_samples_train = 6, 9 + # Number of spatial locations to predict at + n_samples_X_test = 7 + # Number of sample predictions per test point + n_samples_y_test = 5 + + y_train_shape = (n_samples_train,) + if n_targets is not None: + y_train_shape = y_train_shape + (n_targets,) + + # By convention single-output data is squeezed upon prediction + if n_targets is not None and n_targets > 1: + y_test_shape = (n_samples_X_test, n_targets, n_samples_y_test) + else: + y_test_shape = (n_samples_X_test, n_samples_y_test) + + X_train = rng.randn(n_samples_train, n_features) + X_test = rng.randn(n_samples_X_test, n_features) + y_train = rng.randn(*y_train_shape) + + model = GaussianProcessRegressor(normalize_y=normalize_y) + + # FIXME: before fitting, the estimator does not have information regarding + # the number of targets and default to 1. This is inconsistent with the shape + # provided after `fit`. This assert should be made once the following issue + # is fixed: + # https://github.com/scikit-learn/scikit-learn/issues/22430 + # y_samples = model.sample_y(X_test, n_samples=n_samples_y_test) + # assert y_samples.shape == y_test_shape + + model.fit(X_train, y_train) + + y_samples = model.sample_y(X_test, n_samples=n_samples_y_test) + assert y_samples.shape == y_test_shape + + +@pytest.mark.parametrize("n_targets", [None, 1, 2, 3]) +@pytest.mark.parametrize("n_samples", [1, 5]) +def test_sample_y_shape_with_prior(n_targets, n_samples): + """Check the output shape of `sample_y` is consistent before and after `fit`.""" + rng = np.random.RandomState(1024) + + X = rng.randn(10, 3) + y = rng.randn(10, n_targets if n_targets is not None else 1) + + model = GaussianProcessRegressor(n_targets=n_targets) + shape_before_fit = model.sample_y(X, n_samples=n_samples).shape + model.fit(X, y) + shape_after_fit = model.sample_y(X, n_samples=n_samples).shape + assert shape_before_fit == shape_after_fit + + +@pytest.mark.parametrize("n_targets", [None, 1, 2, 3]) +def test_predict_shape_with_prior(n_targets): + """Check the output shape of `predict` with prior distribution.""" + rng = np.random.RandomState(1024) + + n_sample = 10 + X = rng.randn(n_sample, 3) + y = rng.randn(n_sample, n_targets if n_targets is not None else 1) + + model = GaussianProcessRegressor(n_targets=n_targets) + mean_prior, cov_prior = model.predict(X, return_cov=True) + _, std_prior = model.predict(X, return_std=True) + + model.fit(X, y) + mean_post, cov_post = model.predict(X, return_cov=True) + _, std_post = model.predict(X, return_std=True) + + assert mean_prior.shape == mean_post.shape + assert cov_prior.shape == cov_post.shape + assert std_prior.shape == std_post.shape + + +def test_n_targets_error(): + """Check that an error is raised when the number of targets seen at fit is + inconsistent with n_targets. + """ + rng = np.random.RandomState(0) + X = rng.randn(10, 3) + y = rng.randn(10, 2) + + model = GaussianProcessRegressor(n_targets=1) + with pytest.raises(ValueError, match="The number of targets seen in `y`"): + model.fit(X, y) + + +class CustomKernel(C): + """ + A custom kernel that has a diag method that returns the first column of the + input matrix X. This is a helper for the test to check that the input + matrix X is not mutated. + """ + + def diag(self, X): + return X[:, 0] + + +def test_gpr_predict_input_not_modified(): + """ + Check that the input X is not modified by the predict method of the + GaussianProcessRegressor when setting return_std=True. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/24340 + """ + gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y) + + X2_copy = np.copy(X2) + _, _ = gpr.predict(X2, return_std=True) + + assert_allclose(X2, X2_copy) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6832f110e4cc66097d1cf869456538d5fa460df4 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__init__.py @@ -0,0 +1,9 @@ +"""Mixture modeling algorithms.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from ._bayesian_mixture import BayesianGaussianMixture +from ._gaussian_mixture import GaussianMixture + +__all__ = ["GaussianMixture", "BayesianGaussianMixture"] diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d90cd47553aa3ebbfd41581b5ce512f7baeb77f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a998c9c406981474bc77df175e3567ee01fa24f5 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_base.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41170b6bea47155b0e19c745d3714200438bba33 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_bayesian_mixture.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ee5c45876c55ee0b751e1daa7583ae14395b11e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/__pycache__/_gaussian_mixture.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_base.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..ebb069a1ff56300c50fd9c5aa0353dc7ef5f4a0f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_base.py @@ -0,0 +1,564 @@ +"""Base class for mixture models.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real +from time import time + +import numpy as np +from scipy.special import logsumexp + +from .. import cluster +from ..base import BaseEstimator, DensityMixin, _fit_context +from ..cluster import kmeans_plusplus +from ..exceptions import ConvergenceWarning +from ..utils import check_random_state +from ..utils._param_validation import Interval, StrOptions +from ..utils.validation import check_is_fitted, validate_data + + +def _check_shape(param, param_shape, name): + """Validate the shape of the input parameter 'param'. + + Parameters + ---------- + param : array + + param_shape : tuple + + name : str + """ + param = np.array(param) + if param.shape != param_shape: + raise ValueError( + "The parameter '%s' should have the shape of %s, but got %s" + % (name, param_shape, param.shape) + ) + + +class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for mixture models. + + This abstract class specifies an interface for all mixture classes and + provides basic common methods for mixture models. + """ + + _parameter_constraints: dict = { + "n_components": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0.0, None, closed="left")], + "reg_covar": [Interval(Real, 0.0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "n_init": [Interval(Integral, 1, None, closed="left")], + "init_params": [ + StrOptions({"kmeans", "random", "random_from_data", "k-means++"}) + ], + "random_state": ["random_state"], + "warm_start": ["boolean"], + "verbose": ["verbose"], + "verbose_interval": [Interval(Integral, 1, None, closed="left")], + } + + def __init__( + self, + n_components, + tol, + reg_covar, + max_iter, + n_init, + init_params, + random_state, + warm_start, + verbose, + verbose_interval, + ): + self.n_components = n_components + self.tol = tol + self.reg_covar = reg_covar + self.max_iter = max_iter + self.n_init = n_init + self.init_params = init_params + self.random_state = random_state + self.warm_start = warm_start + self.verbose = verbose + self.verbose_interval = verbose_interval + + @abstractmethod + def _check_parameters(self, X): + """Check initial parameters of the derived class. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + pass + + def _initialize_parameters(self, X, random_state): + """Initialize the model parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + random_state : RandomState + A random number generator instance that controls the random seed + used for the method chosen to initialize the parameters. + """ + n_samples, _ = X.shape + + if self.init_params == "kmeans": + resp = np.zeros((n_samples, self.n_components)) + label = ( + cluster.KMeans( + n_clusters=self.n_components, n_init=1, random_state=random_state + ) + .fit(X) + .labels_ + ) + resp[np.arange(n_samples), label] = 1 + elif self.init_params == "random": + resp = random_state.uniform(size=(n_samples, self.n_components)) + resp /= resp.sum(axis=1)[:, np.newaxis] + elif self.init_params == "random_from_data": + resp = np.zeros((n_samples, self.n_components)) + indices = random_state.choice( + n_samples, size=self.n_components, replace=False + ) + resp[indices, np.arange(self.n_components)] = 1 + elif self.init_params == "k-means++": + resp = np.zeros((n_samples, self.n_components)) + _, indices = kmeans_plusplus( + X, + self.n_components, + random_state=random_state, + ) + resp[indices, np.arange(self.n_components)] = 1 + + self._initialize(X, resp) + + @abstractmethod + def _initialize(self, X, resp): + """Initialize the model parameters of the derived class. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + pass + + def fit(self, X, y=None): + """Estimate model parameters with the EM algorithm. + + The method fits the model ``n_init`` times and sets the parameters with + which the model has the largest likelihood or lower bound. Within each + trial, the method iterates between E-step and M-step for ``max_iter`` + times until the change of likelihood or lower bound is less than + ``tol``, otherwise, a ``ConvergenceWarning`` is raised. + If ``warm_start`` is ``True``, then ``n_init`` is ignored and a single + initialization is performed upon the first call. Upon consecutive + calls, training starts where it left off. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + self : object + The fitted mixture. + """ + # parameters are validated in fit_predict + self.fit_predict(X, y) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit_predict(self, X, y=None): + """Estimate model parameters using X and predict the labels for X. + + The method fits the model n_init times and sets the parameters with + which the model has the largest likelihood or lower bound. Within each + trial, the method iterates between E-step and M-step for `max_iter` + times until the change of likelihood or lower bound is less than + `tol`, otherwise, a :class:`~sklearn.exceptions.ConvergenceWarning` is + raised. After fitting, it predicts the most probable label for the + input data points. + + .. versionadded:: 0.20 + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + labels : array, shape (n_samples,) + Component labels. + """ + X = validate_data(self, X, dtype=[np.float64, np.float32], ensure_min_samples=2) + if X.shape[0] < self.n_components: + raise ValueError( + "Expected n_samples >= n_components " + f"but got n_components = {self.n_components}, " + f"n_samples = {X.shape[0]}" + ) + self._check_parameters(X) + + # if we enable warm_start, we will have a unique initialisation + do_init = not (self.warm_start and hasattr(self, "converged_")) + n_init = self.n_init if do_init else 1 + + max_lower_bound = -np.inf + self.converged_ = False + + random_state = check_random_state(self.random_state) + + n_samples, _ = X.shape + for init in range(n_init): + self._print_verbose_msg_init_beg(init) + + if do_init: + self._initialize_parameters(X, random_state) + + lower_bound = -np.inf if do_init else self.lower_bound_ + + if self.max_iter == 0: + best_params = self._get_parameters() + best_n_iter = 0 + else: + converged = False + for n_iter in range(1, self.max_iter + 1): + prev_lower_bound = lower_bound + + log_prob_norm, log_resp = self._e_step(X) + self._m_step(X, log_resp) + lower_bound = self._compute_lower_bound(log_resp, log_prob_norm) + + change = lower_bound - prev_lower_bound + self._print_verbose_msg_iter_end(n_iter, change) + + if abs(change) < self.tol: + converged = True + break + + self._print_verbose_msg_init_end(lower_bound, converged) + + if lower_bound > max_lower_bound or max_lower_bound == -np.inf: + max_lower_bound = lower_bound + best_params = self._get_parameters() + best_n_iter = n_iter + self.converged_ = converged + + # Should only warn about convergence if max_iter > 0, otherwise + # the user is assumed to have used 0-iters initialization + # to get the initial means. + if not self.converged_ and self.max_iter > 0: + warnings.warn( + ( + "Best performing initialization did not converge. " + "Try different init parameters, or increase max_iter, " + "tol, or check for degenerate data." + ), + ConvergenceWarning, + ) + + self._set_parameters(best_params) + self.n_iter_ = best_n_iter + self.lower_bound_ = max_lower_bound + + # Always do a final e-step to guarantee that the labels returned by + # fit_predict(X) are always consistent with fit(X).predict(X) + # for any value of max_iter and tol (and any random_state). + _, log_resp = self._e_step(X) + + return log_resp.argmax(axis=1) + + def _e_step(self, X): + """E step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob_norm : float + Mean of the logarithms of the probabilities of each sample in X + + log_responsibility : array, shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + log_prob_norm, log_resp = self._estimate_log_prob_resp(X) + return np.mean(log_prob_norm), log_resp + + @abstractmethod + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + pass + + @abstractmethod + def _get_parameters(self): + pass + + @abstractmethod + def _set_parameters(self, params): + pass + + def score_samples(self, X): + """Compute the log-likelihood of each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + log_prob : array, shape (n_samples,) + Log-likelihood of each sample in `X` under the current model. + """ + check_is_fitted(self) + X = validate_data(self, X, reset=False) + + return logsumexp(self._estimate_weighted_log_prob(X), axis=1) + + def score(self, X, y=None): + """Compute the per-sample average log-likelihood of the given data X. + + Parameters + ---------- + X : array-like of shape (n_samples, n_dimensions) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + y : Ignored + Not used, present for API consistency by convention. + + Returns + ------- + log_likelihood : float + Log-likelihood of `X` under the Gaussian mixture model. + """ + return self.score_samples(X).mean() + + def predict(self, X): + """Predict the labels for the data samples in X using trained model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + labels : array, shape (n_samples,) + Component labels. + """ + check_is_fitted(self) + X = validate_data(self, X, reset=False) + return self._estimate_weighted_log_prob(X).argmax(axis=1) + + def predict_proba(self, X): + """Evaluate the components' density for each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + List of n_features-dimensional data points. Each row + corresponds to a single data point. + + Returns + ------- + resp : array, shape (n_samples, n_components) + Density of each Gaussian component for each sample in X. + """ + check_is_fitted(self) + X = validate_data(self, X, reset=False) + _, log_resp = self._estimate_log_prob_resp(X) + return np.exp(log_resp) + + def sample(self, n_samples=1): + """Generate random samples from the fitted Gaussian distribution. + + Parameters + ---------- + n_samples : int, default=1 + Number of samples to generate. + + Returns + ------- + X : array, shape (n_samples, n_features) + Randomly generated sample. + + y : array, shape (nsamples,) + Component labels. + """ + check_is_fitted(self) + + if n_samples < 1: + raise ValueError( + "Invalid value for 'n_samples': %d . The sampling requires at " + "least one sample." % (self.n_components) + ) + + _, n_features = self.means_.shape + rng = check_random_state(self.random_state) + n_samples_comp = rng.multinomial(n_samples, self.weights_) + + if self.covariance_type == "full": + X = np.vstack( + [ + rng.multivariate_normal(mean, covariance, int(sample)) + for (mean, covariance, sample) in zip( + self.means_, self.covariances_, n_samples_comp + ) + ] + ) + elif self.covariance_type == "tied": + X = np.vstack( + [ + rng.multivariate_normal(mean, self.covariances_, int(sample)) + for (mean, sample) in zip(self.means_, n_samples_comp) + ] + ) + else: + X = np.vstack( + [ + mean + + rng.standard_normal(size=(sample, n_features)) + * np.sqrt(covariance) + for (mean, covariance, sample) in zip( + self.means_, self.covariances_, n_samples_comp + ) + ] + ) + + y = np.concatenate( + [np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)] + ) + + return (X, y) + + def _estimate_weighted_log_prob(self, X): + """Estimate the weighted log-probabilities, log P(X | Z) + log weights. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + weighted_log_prob : array, shape (n_samples, n_component) + """ + return self._estimate_log_prob(X) + self._estimate_log_weights() + + @abstractmethod + def _estimate_log_weights(self): + """Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm. + + Returns + ------- + log_weight : array, shape (n_components, ) + """ + pass + + @abstractmethod + def _estimate_log_prob(self, X): + """Estimate the log-probabilities log P(X | Z). + + Compute the log-probabilities per each component for each sample. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob : array, shape (n_samples, n_component) + """ + pass + + def _estimate_log_prob_resp(self, X): + """Estimate log probabilities and responsibilities for each sample. + + Compute the log probabilities, weighted log probabilities per + component and responsibilities for each sample in X with respect to + the current state of the model. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + Returns + ------- + log_prob_norm : array, shape (n_samples,) + log p(X) + + log_responsibilities : array, shape (n_samples, n_components) + logarithm of the responsibilities + """ + weighted_log_prob = self._estimate_weighted_log_prob(X) + log_prob_norm = logsumexp(weighted_log_prob, axis=1) + with np.errstate(under="ignore"): + # ignore underflow + log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis] + return log_prob_norm, log_resp + + def _print_verbose_msg_init_beg(self, n_init): + """Print verbose message on initialization.""" + if self.verbose == 1: + print("Initialization %d" % n_init) + elif self.verbose >= 2: + print("Initialization %d" % n_init) + self._init_prev_time = time() + self._iter_prev_time = self._init_prev_time + + def _print_verbose_msg_iter_end(self, n_iter, diff_ll): + """Print verbose message on initialization.""" + if n_iter % self.verbose_interval == 0: + if self.verbose == 1: + print(" Iteration %d" % n_iter) + elif self.verbose >= 2: + cur_time = time() + print( + " Iteration %d\t time lapse %.5fs\t ll change %.5f" + % (n_iter, cur_time - self._iter_prev_time, diff_ll) + ) + self._iter_prev_time = cur_time + + def _print_verbose_msg_init_end(self, lb, init_has_converged): + """Print verbose message on the end of iteration.""" + converged_msg = "converged" if init_has_converged else "did not converge" + if self.verbose == 1: + print(f"Initialization {converged_msg}.") + elif self.verbose >= 2: + t = time() - self._init_prev_time + print( + f"Initialization {converged_msg}. time lapse {t:.5f}s\t lower bound" + f" {lb:.5f}." + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..7de5cc844b098ebf9725d3530fc0a705e5096d0e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_bayesian_mixture.py @@ -0,0 +1,887 @@ +"""Bayesian Gaussian Mixture Model.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import math +from numbers import Real + +import numpy as np +from scipy.special import betaln, digamma, gammaln + +from ..utils import check_array +from ..utils._param_validation import Interval, StrOptions +from ._base import BaseMixture, _check_shape +from ._gaussian_mixture import ( + _check_precision_matrix, + _check_precision_positivity, + _compute_log_det_cholesky, + _compute_precision_cholesky, + _estimate_gaussian_parameters, + _estimate_log_gaussian_prob, +) + + +def _log_dirichlet_norm(dirichlet_concentration): + """Compute the log of the Dirichlet distribution normalization term. + + Parameters + ---------- + dirichlet_concentration : array-like of shape (n_samples,) + The parameters values of the Dirichlet distribution. + + Returns + ------- + log_dirichlet_norm : float + The log normalization of the Dirichlet distribution. + """ + return gammaln(np.sum(dirichlet_concentration)) - np.sum( + gammaln(dirichlet_concentration) + ) + + +def _log_wishart_norm(degrees_of_freedom, log_det_precisions_chol, n_features): + """Compute the log of the Wishart distribution normalization term. + + Parameters + ---------- + degrees_of_freedom : array-like of shape (n_components,) + The number of degrees of freedom on the covariance Wishart + distributions. + + log_det_precision_chol : array-like of shape (n_components,) + The determinant of the precision matrix for each component. + + n_features : int + The number of features. + + Return + ------ + log_wishart_norm : array-like of shape (n_components,) + The log normalization of the Wishart distribution. + """ + # To simplify the computation we have removed the np.log(np.pi) term + return -( + degrees_of_freedom * log_det_precisions_chol + + degrees_of_freedom * n_features * 0.5 * math.log(2.0) + + np.sum( + gammaln(0.5 * (degrees_of_freedom - np.arange(n_features)[:, np.newaxis])), + 0, + ) + ) + + +class BayesianGaussianMixture(BaseMixture): + """Variational Bayesian estimation of a Gaussian mixture. + + This class allows to infer an approximate posterior distribution over the + parameters of a Gaussian mixture distribution. The effective number of + components can be inferred from the data. + + This class implements two types of prior for the weights distribution: a + finite mixture model with Dirichlet distribution and an infinite mixture + model with the Dirichlet Process. In practice Dirichlet Process inference + algorithm is approximated and uses a truncated distribution with a fixed + maximum number of components (called the Stick-breaking representation). + The number of components actually used almost always depends on the data. + + .. versionadded:: 0.18 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + n_components : int, default=1 + The number of mixture components. Depending on the data and the value + of the `weight_concentration_prior` the model can decide to not use + all the components by setting some component `weights_` to values very + close to zero. The number of effective components is therefore smaller + than n_components. + + covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' + String describing the type of covariance parameters to use. + Must be one of: + + - 'full' (each component has its own general covariance matrix), + - 'tied' (all components share the same general covariance matrix), + - 'diag' (each component has its own diagonal covariance matrix), + - 'spherical' (each component has its own single variance). + + tol : float, default=1e-3 + The convergence threshold. EM iterations will stop when the + lower bound average gain on the likelihood (of the training data with + respect to the model) is below this threshold. + + reg_covar : float, default=1e-6 + Non-negative regularization added to the diagonal of covariance. + Allows to assure that the covariance matrices are all positive. + + max_iter : int, default=100 + The number of EM iterations to perform. + + n_init : int, default=1 + The number of initializations to perform. The result with the highest + lower bound value on the likelihood is kept. + + init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ + default='kmeans' + The method used to initialize the weights, the means and the + covariances. String must be one of: + + - 'kmeans': responsibilities are initialized using kmeans. + - 'k-means++': use the k-means++ method to initialize. + - 'random': responsibilities are initialized randomly. + - 'random_from_data': initial means are randomly selected data points. + + .. versionchanged:: v1.1 + `init_params` now accepts 'random_from_data' and 'k-means++' as + initialization methods. + + weight_concentration_prior_type : {'dirichlet_process', 'dirichlet_distribution'}, \ + default='dirichlet_process' + String describing the type of the weight concentration prior. + + weight_concentration_prior : float or None, default=None + The dirichlet concentration of each component on the weight + distribution (Dirichlet). This is commonly called gamma in the + literature. The higher concentration puts more mass in + the center and will lead to more components being active, while a lower + concentration parameter will lead to more mass at the edge of the + mixture weights simplex. The value of the parameter must be greater + than 0. If it is None, it's set to ``1. / n_components``. + + mean_precision_prior : float or None, default=None + The precision prior on the mean distribution (Gaussian). + Controls the extent of where means can be placed. Larger + values concentrate the cluster means around `mean_prior`. + The value of the parameter must be greater than 0. + If it is None, it is set to 1. + + mean_prior : array-like, shape (n_features,), default=None + The prior on the mean distribution (Gaussian). + If it is None, it is set to the mean of X. + + degrees_of_freedom_prior : float or None, default=None + The prior of the number of degrees of freedom on the covariance + distributions (Wishart). If it is None, it's set to `n_features`. + + covariance_prior : float or array-like, default=None + The prior on the covariance distribution (Wishart). + If it is None, the emiprical covariance prior is initialized using the + covariance of X. The shape depends on `covariance_type`:: + + (n_features, n_features) if 'full', + (n_features, n_features) if 'tied', + (n_features) if 'diag', + float if 'spherical' + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to the method chosen to initialize the + parameters (see `init_params`). + In addition, it controls the generation of random samples from the + fitted distribution (see the method `sample`). + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + If 'warm_start' is True, the solution of the last fitting is used as + initialization for the next call of fit(). This can speed up + convergence when fit is called several times on similar problems. + See :term:`the Glossary `. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints the current + initialization and each iteration step. If greater than 1 then + it prints also the log probability and the time needed + for each step. + + verbose_interval : int, default=10 + Number of iteration done before the next print. + + Attributes + ---------- + weights_ : array-like of shape (n_components,) + The weights of each mixture components. + + means_ : array-like of shape (n_components, n_features) + The mean of each mixture component. + + covariances_ : array-like + The covariance of each mixture component. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_ : array-like + The precision matrices for each component in the mixture. A precision + matrix is the inverse of a covariance matrix. A covariance matrix is + symmetric positive definite so the mixture of Gaussian can be + equivalently parameterized by the precision matrices. Storing the + precision matrices instead of the covariance matrices makes it more + efficient to compute the log-likelihood of new samples at test time. + The shape depends on ``covariance_type``:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_cholesky_ : array-like + The cholesky decomposition of the precision matrices of each mixture + component. A precision matrix is the inverse of a covariance matrix. + A covariance matrix is symmetric positive definite so the mixture of + Gaussian can be equivalently parameterized by the precision matrices. + Storing the precision matrices instead of the covariance matrices makes + it more efficient to compute the log-likelihood of new samples at test + time. The shape depends on ``covariance_type``:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + converged_ : bool + True when convergence of the best fit of inference was reached, False otherwise. + + n_iter_ : int + Number of step used by the best fit of inference to reach the + convergence. + + lower_bound_ : float + Lower bound value on the model evidence (of the training data) of the + best fit of inference. + + weight_concentration_prior_ : tuple or float + The dirichlet concentration of each component on the weight + distribution (Dirichlet). The type depends on + ``weight_concentration_prior_type``:: + + (float, float) if 'dirichlet_process' (Beta parameters), + float if 'dirichlet_distribution' (Dirichlet parameters). + + The higher concentration puts more mass in + the center and will lead to more components being active, while a lower + concentration parameter will lead to more mass at the edge of the + simplex. + + weight_concentration_ : array-like of shape (n_components,) + The dirichlet concentration of each component on the weight + distribution (Dirichlet). + + mean_precision_prior_ : float + The precision prior on the mean distribution (Gaussian). + Controls the extent of where means can be placed. + Larger values concentrate the cluster means around `mean_prior`. + If mean_precision_prior is set to None, `mean_precision_prior_` is set + to 1. + + mean_precision_ : array-like of shape (n_components,) + The precision of each components on the mean distribution (Gaussian). + + mean_prior_ : array-like of shape (n_features,) + The prior on the mean distribution (Gaussian). + + degrees_of_freedom_prior_ : float + The prior of the number of degrees of freedom on the covariance + distributions (Wishart). + + degrees_of_freedom_ : array-like of shape (n_components,) + The number of degrees of freedom of each components in the model. + + covariance_prior_ : float or array-like + The prior on the covariance distribution (Wishart). + The shape depends on `covariance_type`:: + + (n_features, n_features) if 'full', + (n_features, n_features) if 'tied', + (n_features) if 'diag', + float if 'spherical' + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + GaussianMixture : Finite Gaussian mixture fit with EM. + + References + ---------- + + .. [1] `Bishop, Christopher M. (2006). "Pattern recognition and machine + learning". Vol. 4 No. 4. New York: Springer. + `_ + + .. [2] `Hagai Attias. (2000). "A Variational Bayesian Framework for + Graphical Models". In Advances in Neural Information Processing + Systems 12. + `_ + + .. [3] `Blei, David M. and Michael I. Jordan. (2006). "Variational + inference for Dirichlet process mixtures". Bayesian analysis 1.1 + `_ + + Examples + -------- + >>> import numpy as np + >>> from sklearn.mixture import BayesianGaussianMixture + >>> X = np.array([[1, 2], [1, 4], [1, 0], [4, 2], [12, 4], [10, 7]]) + >>> bgm = BayesianGaussianMixture(n_components=2, random_state=42).fit(X) + >>> bgm.means_ + array([[2.49... , 2.29...], + [8.45..., 4.52... ]]) + >>> bgm.predict([[0, 0], [9, 3]]) + array([0, 1]) + """ + + _parameter_constraints: dict = { + **BaseMixture._parameter_constraints, + "covariance_type": [StrOptions({"spherical", "tied", "diag", "full"})], + "weight_concentration_prior_type": [ + StrOptions({"dirichlet_process", "dirichlet_distribution"}) + ], + "weight_concentration_prior": [ + None, + Interval(Real, 0.0, None, closed="neither"), + ], + "mean_precision_prior": [None, Interval(Real, 0.0, None, closed="neither")], + "mean_prior": [None, "array-like"], + "degrees_of_freedom_prior": [None, Interval(Real, 0.0, None, closed="neither")], + "covariance_prior": [ + None, + "array-like", + Interval(Real, 0.0, None, closed="neither"), + ], + } + + def __init__( + self, + *, + n_components=1, + covariance_type="full", + tol=1e-3, + reg_covar=1e-6, + max_iter=100, + n_init=1, + init_params="kmeans", + weight_concentration_prior_type="dirichlet_process", + weight_concentration_prior=None, + mean_precision_prior=None, + mean_prior=None, + degrees_of_freedom_prior=None, + covariance_prior=None, + random_state=None, + warm_start=False, + verbose=0, + verbose_interval=10, + ): + super().__init__( + n_components=n_components, + tol=tol, + reg_covar=reg_covar, + max_iter=max_iter, + n_init=n_init, + init_params=init_params, + random_state=random_state, + warm_start=warm_start, + verbose=verbose, + verbose_interval=verbose_interval, + ) + + self.covariance_type = covariance_type + self.weight_concentration_prior_type = weight_concentration_prior_type + self.weight_concentration_prior = weight_concentration_prior + self.mean_precision_prior = mean_precision_prior + self.mean_prior = mean_prior + self.degrees_of_freedom_prior = degrees_of_freedom_prior + self.covariance_prior = covariance_prior + + def _check_parameters(self, X): + """Check that the parameters are well defined. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + self._check_weights_parameters() + self._check_means_parameters(X) + self._check_precision_parameters(X) + self._checkcovariance_prior_parameter(X) + + def _check_weights_parameters(self): + """Check the parameter of the Dirichlet distribution.""" + if self.weight_concentration_prior is None: + self.weight_concentration_prior_ = 1.0 / self.n_components + else: + self.weight_concentration_prior_ = self.weight_concentration_prior + + def _check_means_parameters(self, X): + """Check the parameters of the Gaussian distribution. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.mean_precision_prior is None: + self.mean_precision_prior_ = 1.0 + else: + self.mean_precision_prior_ = self.mean_precision_prior + + if self.mean_prior is None: + self.mean_prior_ = X.mean(axis=0) + else: + self.mean_prior_ = check_array( + self.mean_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape(self.mean_prior_, (n_features,), "means") + + def _check_precision_parameters(self, X): + """Check the prior parameters of the precision distribution. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.degrees_of_freedom_prior is None: + self.degrees_of_freedom_prior_ = n_features + elif self.degrees_of_freedom_prior > n_features - 1.0: + self.degrees_of_freedom_prior_ = self.degrees_of_freedom_prior + else: + raise ValueError( + "The parameter 'degrees_of_freedom_prior' " + "should be greater than %d, but got %.3f." + % (n_features - 1, self.degrees_of_freedom_prior) + ) + + def _checkcovariance_prior_parameter(self, X): + """Check the `covariance_prior_`. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + """ + _, n_features = X.shape + + if self.covariance_prior is None: + self.covariance_prior_ = { + "full": np.atleast_2d(np.cov(X.T)), + "tied": np.atleast_2d(np.cov(X.T)), + "diag": np.var(X, axis=0, ddof=1), + "spherical": np.var(X, axis=0, ddof=1).mean(), + }[self.covariance_type] + + elif self.covariance_type in ["full", "tied"]: + self.covariance_prior_ = check_array( + self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape( + self.covariance_prior_, + (n_features, n_features), + "%s covariance_prior" % self.covariance_type, + ) + _check_precision_matrix(self.covariance_prior_, self.covariance_type) + elif self.covariance_type == "diag": + self.covariance_prior_ = check_array( + self.covariance_prior, dtype=[np.float64, np.float32], ensure_2d=False + ) + _check_shape( + self.covariance_prior_, + (n_features,), + "%s covariance_prior" % self.covariance_type, + ) + _check_precision_positivity(self.covariance_prior_, self.covariance_type) + # spherical case + else: + self.covariance_prior_ = self.covariance_prior + + def _initialize(self, X, resp): + """Initialization of the mixture parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + nk, xk, sk = _estimate_gaussian_parameters( + X, resp, self.reg_covar, self.covariance_type + ) + + self._estimate_weights(nk) + self._estimate_means(nk, xk) + self._estimate_precisions(nk, xk, sk) + + def _estimate_weights(self, nk): + """Estimate the parameters of the Dirichlet distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + """ + if self.weight_concentration_prior_type == "dirichlet_process": + # For dirichlet process weight_concentration will be a tuple + # containing the two parameters of the beta distribution + self.weight_concentration_ = ( + 1.0 + nk, + ( + self.weight_concentration_prior_ + + np.hstack((np.cumsum(nk[::-1])[-2::-1], 0)) + ), + ) + else: + # case Variational Gaussian mixture with dirichlet distribution + self.weight_concentration_ = self.weight_concentration_prior_ + nk + + def _estimate_means(self, nk, xk): + """Estimate the parameters of the Gaussian distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + """ + self.mean_precision_ = self.mean_precision_prior_ + nk + self.means_ = ( + self.mean_precision_prior_ * self.mean_prior_ + nk[:, np.newaxis] * xk + ) / self.mean_precision_[:, np.newaxis] + + def _estimate_precisions(self, nk, xk, sk): + """Estimate the precisions parameters of the precision distribution. + + Parameters + ---------- + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like + The shape depends of `covariance_type`: + 'full' : (n_components, n_features, n_features) + 'tied' : (n_features, n_features) + 'diag' : (n_components, n_features) + 'spherical' : (n_components,) + """ + { + "full": self._estimate_wishart_full, + "tied": self._estimate_wishart_tied, + "diag": self._estimate_wishart_diag, + "spherical": self._estimate_wishart_spherical, + }[self.covariance_type](nk, xk, sk) + + self.precisions_cholesky_ = _compute_precision_cholesky( + self.covariances_, self.covariance_type + ) + + def _estimate_wishart_full(self, nk, xk, sk): + """Estimate the full Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components, n_features, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` is + # the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + self.covariances_ = np.empty((self.n_components, n_features, n_features)) + + for k in range(self.n_components): + diff = xk[k] - self.mean_prior_ + self.covariances_[k] = ( + self.covariance_prior_ + + nk[k] * sk[k] + + nk[k] + * self.mean_precision_prior_ + / self.mean_precision_[k] + * np.outer(diff, diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis, np.newaxis] + + def _estimate_wishart_tied(self, nk, xk, sk): + """Estimate the tied Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_features, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = ( + self.degrees_of_freedom_prior_ + nk.sum() / self.n_components + ) + + diff = xk - self.mean_prior_ + self.covariances_ = ( + self.covariance_prior_ + + sk * nk.sum() / self.n_components + + self.mean_precision_prior_ + / self.n_components + * np.dot((nk / self.mean_precision_) * diff.T, diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_ + + def _estimate_wishart_diag(self, nk, xk, sk): + """Estimate the diag Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components, n_features) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + diff = xk - self.mean_prior_ + self.covariances_ = self.covariance_prior_ + nk[:, np.newaxis] * ( + sk + + (self.mean_precision_prior_ / self.mean_precision_)[:, np.newaxis] + * np.square(diff) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_[:, np.newaxis] + + def _estimate_wishart_spherical(self, nk, xk, sk): + """Estimate the spherical Wishart distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + xk : array-like of shape (n_components, n_features) + + sk : array-like of shape (n_components,) + """ + _, n_features = xk.shape + + # Warning : in some Bishop book, there is a typo on the formula 10.63 + # `degrees_of_freedom_k = degrees_of_freedom_0 + Nk` + # is the correct formula + self.degrees_of_freedom_ = self.degrees_of_freedom_prior_ + nk + + diff = xk - self.mean_prior_ + self.covariances_ = self.covariance_prior_ + nk * ( + sk + + self.mean_precision_prior_ + / self.mean_precision_ + * np.mean(np.square(diff), 1) + ) + + # Contrary to the original bishop book, we normalize the covariances + self.covariances_ /= self.degrees_of_freedom_ + + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + n_samples, _ = X.shape + + nk, xk, sk = _estimate_gaussian_parameters( + X, np.exp(log_resp), self.reg_covar, self.covariance_type + ) + self._estimate_weights(nk) + self._estimate_means(nk, xk) + self._estimate_precisions(nk, xk, sk) + + def _estimate_log_weights(self): + if self.weight_concentration_prior_type == "dirichlet_process": + digamma_sum = digamma( + self.weight_concentration_[0] + self.weight_concentration_[1] + ) + digamma_a = digamma(self.weight_concentration_[0]) + digamma_b = digamma(self.weight_concentration_[1]) + return ( + digamma_a + - digamma_sum + + np.hstack((0, np.cumsum(digamma_b - digamma_sum)[:-1])) + ) + else: + # case Variational Gaussian mixture with dirichlet distribution + return digamma(self.weight_concentration_) - digamma( + np.sum(self.weight_concentration_) + ) + + def _estimate_log_prob(self, X): + _, n_features = X.shape + # We remove `n_features * np.log(self.degrees_of_freedom_)` because + # the precision matrix is normalized + log_gauss = _estimate_log_gaussian_prob( + X, self.means_, self.precisions_cholesky_, self.covariance_type + ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) + + log_lambda = n_features * np.log(2.0) + np.sum( + digamma( + 0.5 + * (self.degrees_of_freedom_ - np.arange(0, n_features)[:, np.newaxis]) + ), + 0, + ) + + return log_gauss + 0.5 * (log_lambda - n_features / self.mean_precision_) + + def _compute_lower_bound(self, log_resp, log_prob_norm): + """Estimate the lower bound of the model. + + The lower bound on the likelihood (of the training data with respect to + the model) is used to detect the convergence and has to increase at + each iteration. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array, shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + + log_prob_norm : float + Logarithm of the probability of each sample in X. + + Returns + ------- + lower_bound : float + """ + # Contrary to the original formula, we have done some simplification + # and removed all the constant terms. + (n_features,) = self.mean_prior_.shape + + # We removed `.5 * n_features * np.log(self.degrees_of_freedom_)` + # because the precision matrix is normalized. + log_det_precisions_chol = _compute_log_det_cholesky( + self.precisions_cholesky_, self.covariance_type, n_features + ) - 0.5 * n_features * np.log(self.degrees_of_freedom_) + + if self.covariance_type == "tied": + log_wishart = self.n_components * np.float64( + _log_wishart_norm( + self.degrees_of_freedom_, log_det_precisions_chol, n_features + ) + ) + else: + log_wishart = np.sum( + _log_wishart_norm( + self.degrees_of_freedom_, log_det_precisions_chol, n_features + ) + ) + + if self.weight_concentration_prior_type == "dirichlet_process": + log_norm_weight = -np.sum( + betaln(self.weight_concentration_[0], self.weight_concentration_[1]) + ) + else: + log_norm_weight = _log_dirichlet_norm(self.weight_concentration_) + + return ( + -np.sum(np.exp(log_resp) * log_resp) + - log_wishart + - log_norm_weight + - 0.5 * n_features * np.sum(np.log(self.mean_precision_)) + ) + + def _get_parameters(self): + return ( + self.weight_concentration_, + self.mean_precision_, + self.means_, + self.degrees_of_freedom_, + self.covariances_, + self.precisions_cholesky_, + ) + + def _set_parameters(self, params): + ( + self.weight_concentration_, + self.mean_precision_, + self.means_, + self.degrees_of_freedom_, + self.covariances_, + self.precisions_cholesky_, + ) = params + + # Weights computation + if self.weight_concentration_prior_type == "dirichlet_process": + weight_dirichlet_sum = ( + self.weight_concentration_[0] + self.weight_concentration_[1] + ) + tmp = self.weight_concentration_[1] / weight_dirichlet_sum + self.weights_ = ( + self.weight_concentration_[0] + / weight_dirichlet_sum + * np.hstack((1, np.cumprod(tmp[:-1]))) + ) + self.weights_ /= np.sum(self.weights_) + else: + self.weights_ = self.weight_concentration_ / np.sum( + self.weight_concentration_ + ) + + # Precisions matrices computation + if self.covariance_type == "full": + self.precisions_ = np.array( + [ + np.dot(prec_chol, prec_chol.T) + for prec_chol in self.precisions_cholesky_ + ] + ) + + elif self.covariance_type == "tied": + self.precisions_ = np.dot( + self.precisions_cholesky_, self.precisions_cholesky_.T + ) + else: + self.precisions_ = self.precisions_cholesky_**2 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..98ade2089e273f9ad566c4bb2b70def279aba89c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/_gaussian_mixture.py @@ -0,0 +1,911 @@ +"""Gaussian Mixture Model.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +from scipy import linalg + +from ..utils import check_array +from ..utils._param_validation import StrOptions +from ..utils.extmath import row_norms +from ._base import BaseMixture, _check_shape + +############################################################################### +# Gaussian mixture shape checkers used by the GaussianMixture class + + +def _check_weights(weights, n_components): + """Check the user provided 'weights'. + + Parameters + ---------- + weights : array-like of shape (n_components,) + The proportions of components of each mixture. + + n_components : int + Number of components. + + Returns + ------- + weights : array, shape (n_components,) + """ + weights = check_array(weights, dtype=[np.float64, np.float32], ensure_2d=False) + _check_shape(weights, (n_components,), "weights") + + # check range + if any(np.less(weights, 0.0)) or any(np.greater(weights, 1.0)): + raise ValueError( + "The parameter 'weights' should be in the range " + "[0, 1], but got max value %.5f, min value %.5f" + % (np.min(weights), np.max(weights)) + ) + + # check normalization + if not np.allclose(np.abs(1.0 - np.sum(weights)), 0.0): + raise ValueError( + "The parameter 'weights' should be normalized, but got sum(weights) = %.5f" + % np.sum(weights) + ) + return weights + + +def _check_means(means, n_components, n_features): + """Validate the provided 'means'. + + Parameters + ---------- + means : array-like of shape (n_components, n_features) + The centers of the current components. + + n_components : int + Number of components. + + n_features : int + Number of features. + + Returns + ------- + means : array, (n_components, n_features) + """ + means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False) + _check_shape(means, (n_components, n_features), "means") + return means + + +def _check_precision_positivity(precision, covariance_type): + """Check a precision vector is positive-definite.""" + if np.any(np.less_equal(precision, 0.0)): + raise ValueError("'%s precision' should be positive" % covariance_type) + + +def _check_precision_matrix(precision, covariance_type): + """Check a precision matrix is symmetric and positive-definite.""" + if not ( + np.allclose(precision, precision.T) and np.all(linalg.eigvalsh(precision) > 0.0) + ): + raise ValueError( + "'%s precision' should be symmetric, positive-definite" % covariance_type + ) + + +def _check_precisions_full(precisions, covariance_type): + """Check the precision matrices are symmetric and positive-definite.""" + for prec in precisions: + _check_precision_matrix(prec, covariance_type) + + +def _check_precisions(precisions, covariance_type, n_components, n_features): + """Validate user provided precisions. + + Parameters + ---------- + precisions : array-like + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : str + + n_components : int + Number of components. + + n_features : int + Number of features. + + Returns + ------- + precisions : array + """ + precisions = check_array( + precisions, + dtype=[np.float64, np.float32], + ensure_2d=False, + allow_nd=covariance_type == "full", + ) + + precisions_shape = { + "full": (n_components, n_features, n_features), + "tied": (n_features, n_features), + "diag": (n_components, n_features), + "spherical": (n_components,), + } + _check_shape( + precisions, precisions_shape[covariance_type], "%s precision" % covariance_type + ) + + _check_precisions = { + "full": _check_precisions_full, + "tied": _check_precision_matrix, + "diag": _check_precision_positivity, + "spherical": _check_precision_positivity, + } + _check_precisions[covariance_type](precisions, covariance_type) + return precisions + + +############################################################################### +# Gaussian mixture parameters estimators (used by the M-Step) + + +def _estimate_gaussian_covariances_full(resp, X, nk, means, reg_covar): + """Estimate the full covariance matrices. + + Parameters + ---------- + resp : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariances : array, shape (n_components, n_features, n_features) + The covariance matrix of the current components. + """ + n_components, n_features = means.shape + covariances = np.empty((n_components, n_features, n_features)) + for k in range(n_components): + diff = X - means[k] + covariances[k] = np.dot(resp[:, k] * diff.T, diff) / nk[k] + covariances[k].flat[:: n_features + 1] += reg_covar + return covariances + + +def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar): + """Estimate the tied covariance matrix. + + Parameters + ---------- + resp : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariance : array, shape (n_features, n_features) + The tied covariance matrix of the components. + """ + avg_X2 = np.dot(X.T, X) + avg_means2 = np.dot(nk * means.T, means) + covariance = avg_X2 - avg_means2 + covariance /= nk.sum() + covariance.flat[:: len(covariance) + 1] += reg_covar + return covariance + + +def _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar): + """Estimate the diagonal covariance vectors. + + Parameters + ---------- + responsibilities : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + covariances : array, shape (n_components, n_features) + The covariance vector of the current components. + """ + avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis] + avg_means2 = means**2 + avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis] + return avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar + + +def _estimate_gaussian_covariances_spherical(resp, X, nk, means, reg_covar): + """Estimate the spherical variance values. + + Parameters + ---------- + responsibilities : array-like of shape (n_samples, n_components) + + X : array-like of shape (n_samples, n_features) + + nk : array-like of shape (n_components,) + + means : array-like of shape (n_components, n_features) + + reg_covar : float + + Returns + ------- + variances : array, shape (n_components,) + The variance values of each components. + """ + return _estimate_gaussian_covariances_diag(resp, X, nk, means, reg_covar).mean(1) + + +def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type): + """Estimate the Gaussian distribution parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input data array. + + resp : array-like of shape (n_samples, n_components) + The responsibilities for each data sample in X. + + reg_covar : float + The regularization added to the diagonal of the covariance matrices. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + nk : array-like of shape (n_components,) + The numbers of data samples in the current components. + + means : array-like of shape (n_components, n_features) + The centers of the current components. + + covariances : array-like + The covariance matrix of the current components. + The shape depends of the covariance_type. + """ + nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps + means = np.dot(resp.T, X) / nk[:, np.newaxis] + covariances = { + "full": _estimate_gaussian_covariances_full, + "tied": _estimate_gaussian_covariances_tied, + "diag": _estimate_gaussian_covariances_diag, + "spherical": _estimate_gaussian_covariances_spherical, + }[covariance_type](resp, X, nk, means, reg_covar) + return nk, means, covariances + + +def _compute_precision_cholesky(covariances, covariance_type): + """Compute the Cholesky decomposition of the precisions. + + Parameters + ---------- + covariances : array-like + The covariance matrix of the current components. + The shape depends of the covariance_type. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + precisions_cholesky : array-like + The cholesky decomposition of sample precisions of the current + components. The shape depends of the covariance_type. + """ + estimate_precision_error_message = ( + "Fitting the mixture model failed because some components have " + "ill-defined empirical covariance (for instance caused by singleton " + "or collapsed samples). Try to decrease the number of components, " + "or increase reg_covar." + ) + + if covariance_type == "full": + n_components, n_features, _ = covariances.shape + precisions_chol = np.empty((n_components, n_features, n_features)) + for k, covariance in enumerate(covariances): + try: + cov_chol = linalg.cholesky(covariance, lower=True) + except linalg.LinAlgError: + raise ValueError(estimate_precision_error_message) + precisions_chol[k] = linalg.solve_triangular( + cov_chol, np.eye(n_features), lower=True + ).T + elif covariance_type == "tied": + _, n_features = covariances.shape + try: + cov_chol = linalg.cholesky(covariances, lower=True) + except linalg.LinAlgError: + raise ValueError(estimate_precision_error_message) + precisions_chol = linalg.solve_triangular( + cov_chol, np.eye(n_features), lower=True + ).T + else: + if np.any(np.less_equal(covariances, 0.0)): + raise ValueError(estimate_precision_error_message) + precisions_chol = 1.0 / np.sqrt(covariances) + return precisions_chol + + +def _flipudlr(array): + """Reverse the rows and columns of an array.""" + return np.flipud(np.fliplr(array)) + + +def _compute_precision_cholesky_from_precisions(precisions, covariance_type): + r"""Compute the Cholesky decomposition of precisions using precisions themselves. + + As implemented in :func:`_compute_precision_cholesky`, the `precisions_cholesky_` is + an upper-triangular matrix for each Gaussian component, which can be expressed as + the $UU^T$ factorization of the precision matrix for each Gaussian component, where + $U$ is an upper-triangular matrix. + + In order to use the Cholesky decomposition to get $UU^T$, the precision matrix + $\Lambda$ needs to be permutated such that its rows and columns are reversed, which + can be done by applying a similarity transformation with an exchange matrix $J$, + where the 1 elements reside on the anti-diagonal and all other elements are 0. In + particular, the Cholesky decomposition of the transformed precision matrix is + $J\Lambda J=LL^T$, where $L$ is a lower-triangular matrix. Because $\Lambda=UU^T$ + and $J=J^{-1}=J^T$, the `precisions_cholesky_` for each Gaussian component can be + expressed as $JLJ$. + + Refer to #26415 for details. + + Parameters + ---------- + precisions : array-like + The precision matrix of the current components. + The shape depends on the covariance_type. + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + The type of precision matrices. + + Returns + ------- + precisions_cholesky : array-like + The cholesky decomposition of sample precisions of the current + components. The shape depends on the covariance_type. + """ + if covariance_type == "full": + precisions_cholesky = np.array( + [ + _flipudlr(linalg.cholesky(_flipudlr(precision), lower=True)) + for precision in precisions + ] + ) + elif covariance_type == "tied": + precisions_cholesky = _flipudlr( + linalg.cholesky(_flipudlr(precisions), lower=True) + ) + else: + precisions_cholesky = np.sqrt(precisions) + return precisions_cholesky + + +############################################################################### +# Gaussian mixture probability estimators +def _compute_log_det_cholesky(matrix_chol, covariance_type, n_features): + """Compute the log-det of the cholesky decomposition of matrices. + + Parameters + ---------- + matrix_chol : array-like + Cholesky decompositions of the matrices. + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + + n_features : int + Number of features. + + Returns + ------- + log_det_precision_chol : array-like of shape (n_components,) + The determinant of the precision matrix for each component. + """ + if covariance_type == "full": + n_components, _, _ = matrix_chol.shape + log_det_chol = np.sum( + np.log(matrix_chol.reshape(n_components, -1)[:, :: n_features + 1]), 1 + ) + + elif covariance_type == "tied": + log_det_chol = np.sum(np.log(np.diag(matrix_chol))) + + elif covariance_type == "diag": + log_det_chol = np.sum(np.log(matrix_chol), axis=1) + + else: + log_det_chol = n_features * (np.log(matrix_chol)) + + return log_det_chol + + +def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type): + """Estimate the log Gaussian probability. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + means : array-like of shape (n_components, n_features) + + precisions_chol : array-like + Cholesky decompositions of the precision matrices. + 'full' : shape of (n_components, n_features, n_features) + 'tied' : shape of (n_features, n_features) + 'diag' : shape of (n_components, n_features) + 'spherical' : shape of (n_components,) + + covariance_type : {'full', 'tied', 'diag', 'spherical'} + + Returns + ------- + log_prob : array, shape (n_samples, n_components) + """ + n_samples, n_features = X.shape + n_components, _ = means.shape + # The determinant of the precision matrix from the Cholesky decomposition + # corresponds to the negative half of the determinant of the full precision + # matrix. + # In short: det(precision_chol) = - det(precision) / 2 + log_det = _compute_log_det_cholesky(precisions_chol, covariance_type, n_features) + + if covariance_type == "full": + log_prob = np.empty((n_samples, n_components)) + for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)): + y = np.dot(X, prec_chol) - np.dot(mu, prec_chol) + log_prob[:, k] = np.sum(np.square(y), axis=1) + + elif covariance_type == "tied": + log_prob = np.empty((n_samples, n_components)) + for k, mu in enumerate(means): + y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol) + log_prob[:, k] = np.sum(np.square(y), axis=1) + + elif covariance_type == "diag": + precisions = precisions_chol**2 + log_prob = ( + np.sum((means**2 * precisions), 1) + - 2.0 * np.dot(X, (means * precisions).T) + + np.dot(X**2, precisions.T) + ) + + elif covariance_type == "spherical": + precisions = precisions_chol**2 + log_prob = ( + np.sum(means**2, 1) * precisions + - 2 * np.dot(X, means.T * precisions) + + np.outer(row_norms(X, squared=True), precisions) + ) + # Since we are using the precision of the Cholesky decomposition, + # `- 0.5 * log_det_precision` becomes `+ log_det_precision_chol` + return -0.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det + + +class GaussianMixture(BaseMixture): + """Gaussian Mixture. + + Representation of a Gaussian mixture model probability distribution. + This class allows to estimate the parameters of a Gaussian mixture + distribution. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.18 + + Parameters + ---------- + n_components : int, default=1 + The number of mixture components. + + covariance_type : {'full', 'tied', 'diag', 'spherical'}, default='full' + String describing the type of covariance parameters to use. + Must be one of: + + - 'full': each component has its own general covariance matrix. + - 'tied': all components share the same general covariance matrix. + - 'diag': each component has its own diagonal covariance matrix. + - 'spherical': each component has its own single variance. + + tol : float, default=1e-3 + The convergence threshold. EM iterations will stop when the + lower bound average gain is below this threshold. + + reg_covar : float, default=1e-6 + Non-negative regularization added to the diagonal of covariance. + Allows to assure that the covariance matrices are all positive. + + max_iter : int, default=100 + The number of EM iterations to perform. + + n_init : int, default=1 + The number of initializations to perform. The best results are kept. + + init_params : {'kmeans', 'k-means++', 'random', 'random_from_data'}, \ + default='kmeans' + The method used to initialize the weights, the means and the + precisions. + String must be one of: + + - 'kmeans' : responsibilities are initialized using kmeans. + - 'k-means++' : use the k-means++ method to initialize. + - 'random' : responsibilities are initialized randomly. + - 'random_from_data' : initial means are randomly selected data points. + + .. versionchanged:: v1.1 + `init_params` now accepts 'random_from_data' and 'k-means++' as + initialization methods. + + weights_init : array-like of shape (n_components, ), default=None + The user-provided initial weights. + If it is None, weights are initialized using the `init_params` method. + + means_init : array-like of shape (n_components, n_features), default=None + The user-provided initial means, + If it is None, means are initialized using the `init_params` method. + + precisions_init : array-like, default=None + The user-provided initial precisions (inverse of the covariance + matrices). + If it is None, precisions are initialized using the 'init_params' + method. + The shape depends on 'covariance_type':: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + random_state : int, RandomState instance or None, default=None + Controls the random seed given to the method chosen to initialize the + parameters (see `init_params`). + In addition, it controls the generation of random samples from the + fitted distribution (see the method `sample`). + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + warm_start : bool, default=False + If 'warm_start' is True, the solution of the last fitting is used as + initialization for the next call of fit(). This can speed up + convergence when fit is called several times on similar problems. + In that case, 'n_init' is ignored and only a single initialization + occurs upon the first call. + See :term:`the Glossary `. + + verbose : int, default=0 + Enable verbose output. If 1 then it prints the current + initialization and each iteration step. If greater than 1 then + it prints also the log probability and the time needed + for each step. + + verbose_interval : int, default=10 + Number of iteration done before the next print. + + Attributes + ---------- + weights_ : array-like of shape (n_components,) + The weights of each mixture components. + + means_ : array-like of shape (n_components, n_features) + The mean of each mixture component. + + covariances_ : array-like + The covariance of each mixture component. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_ : array-like + The precision matrices for each component in the mixture. A precision + matrix is the inverse of a covariance matrix. A covariance matrix is + symmetric positive definite so the mixture of Gaussian can be + equivalently parameterized by the precision matrices. Storing the + precision matrices instead of the covariance matrices makes it more + efficient to compute the log-likelihood of new samples at test time. + The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + precisions_cholesky_ : array-like + The cholesky decomposition of the precision matrices of each mixture + component. A precision matrix is the inverse of a covariance matrix. + A covariance matrix is symmetric positive definite so the mixture of + Gaussian can be equivalently parameterized by the precision matrices. + Storing the precision matrices instead of the covariance matrices makes + it more efficient to compute the log-likelihood of new samples at test + time. The shape depends on `covariance_type`:: + + (n_components,) if 'spherical', + (n_features, n_features) if 'tied', + (n_components, n_features) if 'diag', + (n_components, n_features, n_features) if 'full' + + converged_ : bool + True when convergence of the best fit of EM was reached, False otherwise. + + n_iter_ : int + Number of step used by the best fit of EM to reach the convergence. + + lower_bound_ : float + Lower bound value on the log-likelihood (of the training data with + respect to the model) of the best fit of EM. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BayesianGaussianMixture : Gaussian mixture model fit with a variational + inference. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.mixture import GaussianMixture + >>> X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]]) + >>> gm = GaussianMixture(n_components=2, random_state=0).fit(X) + >>> gm.means_ + array([[10., 2.], + [ 1., 2.]]) + >>> gm.predict([[0, 0], [12, 3]]) + array([1, 0]) + """ + + _parameter_constraints: dict = { + **BaseMixture._parameter_constraints, + "covariance_type": [StrOptions({"full", "tied", "diag", "spherical"})], + "weights_init": ["array-like", None], + "means_init": ["array-like", None], + "precisions_init": ["array-like", None], + } + + def __init__( + self, + n_components=1, + *, + covariance_type="full", + tol=1e-3, + reg_covar=1e-6, + max_iter=100, + n_init=1, + init_params="kmeans", + weights_init=None, + means_init=None, + precisions_init=None, + random_state=None, + warm_start=False, + verbose=0, + verbose_interval=10, + ): + super().__init__( + n_components=n_components, + tol=tol, + reg_covar=reg_covar, + max_iter=max_iter, + n_init=n_init, + init_params=init_params, + random_state=random_state, + warm_start=warm_start, + verbose=verbose, + verbose_interval=verbose_interval, + ) + + self.covariance_type = covariance_type + self.weights_init = weights_init + self.means_init = means_init + self.precisions_init = precisions_init + + def _check_parameters(self, X): + """Check the Gaussian mixture parameters are well defined.""" + _, n_features = X.shape + + if self.weights_init is not None: + self.weights_init = _check_weights(self.weights_init, self.n_components) + + if self.means_init is not None: + self.means_init = _check_means( + self.means_init, self.n_components, n_features + ) + + if self.precisions_init is not None: + self.precisions_init = _check_precisions( + self.precisions_init, + self.covariance_type, + self.n_components, + n_features, + ) + + def _initialize_parameters(self, X, random_state): + # If all the initial parameters are all provided, then there is no need to run + # the initialization. + compute_resp = ( + self.weights_init is None + or self.means_init is None + or self.precisions_init is None + ) + if compute_resp: + super()._initialize_parameters(X, random_state) + else: + self._initialize(X, None) + + def _initialize(self, X, resp): + """Initialization of the Gaussian mixture parameters. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + resp : array-like of shape (n_samples, n_components) + """ + n_samples, _ = X.shape + weights, means, covariances = None, None, None + if resp is not None: + weights, means, covariances = _estimate_gaussian_parameters( + X, resp, self.reg_covar, self.covariance_type + ) + if self.weights_init is None: + weights /= n_samples + + self.weights_ = weights if self.weights_init is None else self.weights_init + self.means_ = means if self.means_init is None else self.means_init + + if self.precisions_init is None: + self.covariances_ = covariances + self.precisions_cholesky_ = _compute_precision_cholesky( + covariances, self.covariance_type + ) + else: + self.precisions_cholesky_ = _compute_precision_cholesky_from_precisions( + self.precisions_init, self.covariance_type + ) + + def _m_step(self, X, log_resp): + """M step. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + + log_resp : array-like of shape (n_samples, n_components) + Logarithm of the posterior probabilities (or responsibilities) of + the point of each sample in X. + """ + self.weights_, self.means_, self.covariances_ = _estimate_gaussian_parameters( + X, np.exp(log_resp), self.reg_covar, self.covariance_type + ) + self.weights_ /= self.weights_.sum() + self.precisions_cholesky_ = _compute_precision_cholesky( + self.covariances_, self.covariance_type + ) + + def _estimate_log_prob(self, X): + return _estimate_log_gaussian_prob( + X, self.means_, self.precisions_cholesky_, self.covariance_type + ) + + def _estimate_log_weights(self): + return np.log(self.weights_) + + def _compute_lower_bound(self, _, log_prob_norm): + return log_prob_norm + + def _get_parameters(self): + return ( + self.weights_, + self.means_, + self.covariances_, + self.precisions_cholesky_, + ) + + def _set_parameters(self, params): + ( + self.weights_, + self.means_, + self.covariances_, + self.precisions_cholesky_, + ) = params + + # Attributes computation + _, n_features = self.means_.shape + + if self.covariance_type == "full": + self.precisions_ = np.empty(self.precisions_cholesky_.shape) + for k, prec_chol in enumerate(self.precisions_cholesky_): + self.precisions_[k] = np.dot(prec_chol, prec_chol.T) + + elif self.covariance_type == "tied": + self.precisions_ = np.dot( + self.precisions_cholesky_, self.precisions_cholesky_.T + ) + else: + self.precisions_ = self.precisions_cholesky_**2 + + def _n_parameters(self): + """Return the number of free parameters in the model.""" + _, n_features = self.means_.shape + if self.covariance_type == "full": + cov_params = self.n_components * n_features * (n_features + 1) / 2.0 + elif self.covariance_type == "diag": + cov_params = self.n_components * n_features + elif self.covariance_type == "tied": + cov_params = n_features * (n_features + 1) / 2.0 + elif self.covariance_type == "spherical": + cov_params = self.n_components + mean_params = n_features * self.n_components + return int(cov_params + mean_params + self.n_components - 1) + + def bic(self, X): + """Bayesian information criterion for the current model on the input X. + + You can refer to this :ref:`mathematical section ` for more + details regarding the formulation of the BIC used. + + Parameters + ---------- + X : array of shape (n_samples, n_dimensions) + The input samples. + + Returns + ------- + bic : float + The lower the better. + """ + return -2 * self.score(X) * X.shape[0] + self._n_parameters() * np.log( + X.shape[0] + ) + + def aic(self, X): + """Akaike information criterion for the current model on the input X. + + You can refer to this :ref:`mathematical section ` for more + details regarding the formulation of the AIC used. + + Parameters + ---------- + X : array of shape (n_samples, n_dimensions) + The input samples. + + Returns + ------- + aic : float + The lower the better. + """ + return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters() diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5bc92a7c44f1bfd87edace13f387a2727c6a539 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3b447a25c83143df5b420aa6d15fd3a8be8ff94 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_bayesian_mixture.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70c06d0966a6ef9a436ea1c3bdee7bb72dc47514 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_gaussian_mixture.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d12e6a188cb30b9f4890f6f6f2581b44890e2a91 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/__pycache__/test_mixture.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..d17e6710ee5a784e87457bc7a52dc9d68c1abba0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_bayesian_mixture.py @@ -0,0 +1,464 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import copy + +import numpy as np +import pytest +from scipy.special import gammaln + +from sklearn.exceptions import NotFittedError +from sklearn.metrics.cluster import adjusted_rand_score +from sklearn.mixture import BayesianGaussianMixture +from sklearn.mixture._bayesian_mixture import _log_dirichlet_norm, _log_wishart_norm +from sklearn.mixture.tests.test_gaussian_mixture import RandomData +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_equal, +) + +COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] +PRIOR_TYPE = ["dirichlet_process", "dirichlet_distribution"] + + +def test_log_dirichlet_norm(): + rng = np.random.RandomState(0) + + weight_concentration = rng.rand(2) + expected_norm = gammaln(np.sum(weight_concentration)) - np.sum( + gammaln(weight_concentration) + ) + predected_norm = _log_dirichlet_norm(weight_concentration) + + assert_almost_equal(expected_norm, predected_norm) + + +def test_log_wishart_norm(): + rng = np.random.RandomState(0) + + n_components, n_features = 5, 2 + degrees_of_freedom = np.abs(rng.rand(n_components)) + 1.0 + log_det_precisions_chol = n_features * np.log(range(2, 2 + n_components)) + + expected_norm = np.empty(5) + for k, (degrees_of_freedom_k, log_det_k) in enumerate( + zip(degrees_of_freedom, log_det_precisions_chol) + ): + expected_norm[k] = -( + degrees_of_freedom_k * (log_det_k + 0.5 * n_features * np.log(2.0)) + + np.sum( + gammaln( + 0.5 + * (degrees_of_freedom_k - np.arange(0, n_features)[:, np.newaxis]) + ), + 0, + ) + ).item() + predected_norm = _log_wishart_norm( + degrees_of_freedom, log_det_precisions_chol, n_features + ) + + assert_almost_equal(expected_norm, predected_norm) + + +def test_bayesian_mixture_weights_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_components, n_features = 10, 5, 2 + X = rng.rand(n_samples, n_features) + + # Check correct init for a given value of weight_concentration_prior + weight_concentration_prior = rng.rand() + bgmm = BayesianGaussianMixture( + weight_concentration_prior=weight_concentration_prior, random_state=rng + ).fit(X) + assert_almost_equal(weight_concentration_prior, bgmm.weight_concentration_prior_) + + # Check correct init for the default value of weight_concentration_prior + bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) + assert_almost_equal(1.0 / n_components, bgmm.weight_concentration_prior_) + + +def test_bayesian_mixture_mean_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_components, n_features = 10, 3, 2 + X = rng.rand(n_samples, n_features) + + # Check correct init for a given value of mean_precision_prior + mean_precision_prior = rng.rand() + bgmm = BayesianGaussianMixture( + mean_precision_prior=mean_precision_prior, random_state=rng + ).fit(X) + assert_almost_equal(mean_precision_prior, bgmm.mean_precision_prior_) + + # Check correct init for the default value of mean_precision_prior + bgmm = BayesianGaussianMixture(random_state=rng).fit(X) + assert_almost_equal(1.0, bgmm.mean_precision_prior_) + + # Check correct init for a given value of mean_prior + mean_prior = rng.rand(n_features) + bgmm = BayesianGaussianMixture( + n_components=n_components, mean_prior=mean_prior, random_state=rng + ).fit(X) + assert_almost_equal(mean_prior, bgmm.mean_prior_) + + # Check correct init for the default value of bemean_priorta + bgmm = BayesianGaussianMixture(n_components=n_components, random_state=rng).fit(X) + assert_almost_equal(X.mean(axis=0), bgmm.mean_prior_) + + +def test_bayesian_mixture_precisions_prior_initialisation(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + X = rng.rand(n_samples, n_features) + + # Check raise message for a bad value of degrees_of_freedom_prior + bad_degrees_of_freedom_prior_ = n_features - 1.0 + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=bad_degrees_of_freedom_prior_, random_state=rng + ) + msg = ( + "The parameter 'degrees_of_freedom_prior' should be greater than" + f" {n_features -1}, but got {bad_degrees_of_freedom_prior_:.3f}." + ) + with pytest.raises(ValueError, match=msg): + bgmm.fit(X) + + # Check correct init for a given value of degrees_of_freedom_prior + degrees_of_freedom_prior = rng.rand() + n_features - 1.0 + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=degrees_of_freedom_prior, random_state=rng + ).fit(X) + assert_almost_equal(degrees_of_freedom_prior, bgmm.degrees_of_freedom_prior_) + + # Check correct init for the default value of degrees_of_freedom_prior + degrees_of_freedom_prior_default = n_features + bgmm = BayesianGaussianMixture( + degrees_of_freedom_prior=degrees_of_freedom_prior_default, random_state=rng + ).fit(X) + assert_almost_equal( + degrees_of_freedom_prior_default, bgmm.degrees_of_freedom_prior_ + ) + + # Check correct init for a given value of covariance_prior + covariance_prior = { + "full": np.cov(X.T, bias=1) + 10, + "tied": np.cov(X.T, bias=1) + 5, + "diag": np.diag(np.atleast_2d(np.cov(X.T, bias=1))) + 3, + "spherical": rng.rand(), + } + + bgmm = BayesianGaussianMixture(random_state=rng) + for cov_type in ["full", "tied", "diag", "spherical"]: + bgmm.covariance_type = cov_type + bgmm.covariance_prior = covariance_prior[cov_type] + bgmm.fit(X) + assert_almost_equal(covariance_prior[cov_type], bgmm.covariance_prior_) + + # Check correct init for the default value of covariance_prior + covariance_prior_default = { + "full": np.atleast_2d(np.cov(X.T)), + "tied": np.atleast_2d(np.cov(X.T)), + "diag": np.var(X, axis=0, ddof=1), + "spherical": np.var(X, axis=0, ddof=1).mean(), + } + + bgmm = BayesianGaussianMixture(random_state=0) + for cov_type in ["full", "tied", "diag", "spherical"]: + bgmm.covariance_type = cov_type + bgmm.fit(X) + assert_almost_equal(covariance_prior_default[cov_type], bgmm.covariance_prior_) + + +def test_bayesian_mixture_check_is_fitted(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + + # Check raise message + bgmm = BayesianGaussianMixture(random_state=rng) + X = rng.rand(n_samples, n_features) + + msg = "This BayesianGaussianMixture instance is not fitted yet." + with pytest.raises(ValueError, match=msg): + bgmm.score(X) + + +def test_bayesian_mixture_weights(): + rng = np.random.RandomState(0) + n_samples, n_features = 10, 2 + + X = rng.rand(n_samples, n_features) + + # Case Dirichlet distribution for the weight concentration prior type + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type="dirichlet_distribution", + n_components=3, + random_state=rng, + ).fit(X) + + expected_weights = bgmm.weight_concentration_ / np.sum(bgmm.weight_concentration_) + assert_almost_equal(expected_weights, bgmm.weights_) + assert_almost_equal(np.sum(bgmm.weights_), 1.0) + + # Case Dirichlet process for the weight concentration prior type + dpgmm = BayesianGaussianMixture( + weight_concentration_prior_type="dirichlet_process", + n_components=3, + random_state=rng, + ).fit(X) + weight_dirichlet_sum = ( + dpgmm.weight_concentration_[0] + dpgmm.weight_concentration_[1] + ) + tmp = dpgmm.weight_concentration_[1] / weight_dirichlet_sum + expected_weights = ( + dpgmm.weight_concentration_[0] + / weight_dirichlet_sum + * np.hstack((1, np.cumprod(tmp[:-1]))) + ) + expected_weights /= np.sum(expected_weights) + assert_almost_equal(expected_weights, dpgmm.weights_) + assert_almost_equal(np.sum(dpgmm.weights_), 1.0) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +def test_monotonic_likelihood(): + # We check that each step of the each step of variational inference without + # regularization improve monotonically the training set of the bound + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=20) + n_components = rand_data.n_components + + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type=covar_type, + warm_start=True, + max_iter=1, + random_state=rng, + tol=1e-3, + ) + current_lower_bound = -np.inf + # Do one training iteration at a time so we can make sure that the + # training log likelihood increases after each iteration. + for _ in range(600): + prev_lower_bound = current_lower_bound + current_lower_bound = bgmm.fit(X).lower_bound_ + assert current_lower_bound >= prev_lower_bound + + if bgmm.converged_: + break + assert bgmm.converged_ + + +def test_compare_covar_type(): + # We can compare the 'full' precision with the other cov_type if we apply + # 1 iter of the M-step (done during _initialize_parameters). + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + X = rand_data.X["full"] + n_components = rand_data.n_components + + for prior_type in PRIOR_TYPE: + # Computation of the full_covariance + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="full", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + full_covariances = ( + bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis, np.newaxis] + ) + + # Check tied_covariance = mean(full_covariances, 0) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="tied", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + tied_covariance = bgmm.covariances_ * bgmm.degrees_of_freedom_ + assert_almost_equal(tied_covariance, np.mean(full_covariances, 0)) + + # Check diag_covariance = diag(full_covariances) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="diag", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + diag_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_[:, np.newaxis] + assert_almost_equal( + diag_covariances, np.array([np.diag(cov) for cov in full_covariances]) + ) + + # Check spherical_covariance = np.mean(diag_covariances, 0) + bgmm = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=2 * n_components, + covariance_type="spherical", + max_iter=1, + random_state=0, + tol=1e-7, + ) + bgmm._check_parameters(X) + bgmm._initialize_parameters(X, np.random.RandomState(0)) + + spherical_covariances = bgmm.covariances_ * bgmm.degrees_of_freedom_ + assert_almost_equal(spherical_covariances, np.mean(diag_covariances, 1)) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +def test_check_covariance_precision(): + # We check that the dot product of the covariance and the precision + # matrices is identity. + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components, n_features = 2 * rand_data.n_components, 2 + + # Computation of the full_covariance + bgmm = BayesianGaussianMixture( + n_components=n_components, max_iter=100, random_state=rng, tol=1e-3, reg_covar=0 + ) + for covar_type in COVARIANCE_TYPE: + bgmm.covariance_type = covar_type + bgmm.fit(rand_data.X[covar_type]) + + if covar_type == "full": + for covar, precision in zip(bgmm.covariances_, bgmm.precisions_): + assert_almost_equal(np.dot(covar, precision), np.eye(n_features)) + elif covar_type == "tied": + assert_almost_equal( + np.dot(bgmm.covariances_, bgmm.precisions_), np.eye(n_features) + ) + + elif covar_type == "diag": + assert_almost_equal( + bgmm.covariances_ * bgmm.precisions_, + np.ones((n_components, n_features)), + ) + + else: + assert_almost_equal( + bgmm.covariances_ * bgmm.precisions_, np.ones(n_components) + ) + + +def test_invariant_translation(): + # We check here that adding a constant in the data change correctly the + # parameters of the mixture + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=100) + n_components = 2 * rand_data.n_components + + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + bgmm1 = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=n_components, + max_iter=100, + random_state=0, + tol=1e-3, + reg_covar=0, + ).fit(X) + bgmm2 = BayesianGaussianMixture( + weight_concentration_prior_type=prior_type, + n_components=n_components, + max_iter=100, + random_state=0, + tol=1e-3, + reg_covar=0, + ).fit(X + 100) + + assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100) + assert_almost_equal(bgmm1.weights_, bgmm2.weights_) + assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_) + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize( + "seed, max_iter, tol", + [ + (0, 2, 1e-7), # strict non-convergence + (1, 2, 1e-1), # loose non-convergence + (3, 300, 1e-7), # strict convergence + (4, 300, 1e-1), # loose convergence + ], +) +def test_bayesian_mixture_fit_predict(seed, max_iter, tol): + rng = np.random.RandomState(seed) + rand_data = RandomData(rng, n_samples=50, scale=7) + n_components = 2 * rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + bgmm1 = BayesianGaussianMixture( + n_components=n_components, + max_iter=max_iter, + random_state=rng, + tol=tol, + reg_covar=0, + ) + bgmm1.covariance_type = covar_type + bgmm2 = copy.deepcopy(bgmm1) + X = rand_data.X[covar_type] + + Y_pred1 = bgmm1.fit(X).predict(X) + Y_pred2 = bgmm2.fit_predict(X) + assert_array_equal(Y_pred1, Y_pred2) + + +def test_bayesian_mixture_fit_predict_n_init(): + # Check that fit_predict is equivalent to fit.predict, when n_init > 1 + X = np.random.RandomState(0).randn(50, 5) + gm = BayesianGaussianMixture(n_components=5, n_init=10, random_state=0) + y_pred1 = gm.fit_predict(X) + y_pred2 = gm.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +def test_bayesian_mixture_predict_predict_proba(): + # this is the same test as test_gaussian_mixture_predict_predict_proba() + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + for prior_type in PRIOR_TYPE: + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + bgmm = BayesianGaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weight_concentration_prior_type=prior_type, + covariance_type=covar_type, + ) + + # Check a warning message arrive if we don't do fit + msg = ( + "This BayesianGaussianMixture instance is not fitted yet. " + "Call 'fit' with appropriate arguments before using this " + "estimator." + ) + with pytest.raises(NotFittedError, match=msg): + bgmm.predict(X) + + bgmm.fit(X) + Y_pred = bgmm.predict(X) + Y_pred_proba = bgmm.predict_proba(X).argmax(axis=1) + assert_array_equal(Y_pred, Y_pred_proba) + assert adjusted_rand_score(Y, Y_pred) >= 0.95 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..f6e3ffb4991f25a8b3babd220e763a1d1c7e98a9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_gaussian_mixture.py @@ -0,0 +1,1420 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import copy +import itertools +import re +import sys +import warnings +from io import StringIO +from unittest.mock import Mock + +import numpy as np +import pytest +from scipy import linalg, stats + +import sklearn +from sklearn.cluster import KMeans +from sklearn.covariance import EmpiricalCovariance +from sklearn.datasets import make_spd_matrix +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.metrics.cluster import adjusted_rand_score +from sklearn.mixture import GaussianMixture +from sklearn.mixture._gaussian_mixture import ( + _compute_log_det_cholesky, + _compute_precision_cholesky, + _estimate_gaussian_covariances_diag, + _estimate_gaussian_covariances_full, + _estimate_gaussian_covariances_spherical, + _estimate_gaussian_covariances_tied, + _estimate_gaussian_parameters, +) +from sklearn.utils._testing import ( + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.extmath import fast_logdet + +COVARIANCE_TYPE = ["full", "tied", "diag", "spherical"] + + +def generate_data(n_samples, n_features, weights, means, precisions, covariance_type): + rng = np.random.RandomState(0) + + X = [] + if covariance_type == "spherical": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["spherical"])): + X.append( + rng.multivariate_normal( + m, c * np.eye(n_features), int(np.round(w * n_samples)) + ) + ) + if covariance_type == "diag": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["diag"])): + X.append( + rng.multivariate_normal(m, np.diag(c), int(np.round(w * n_samples))) + ) + if covariance_type == "tied": + for _, (w, m) in enumerate(zip(weights, means)): + X.append( + rng.multivariate_normal( + m, precisions["tied"], int(np.round(w * n_samples)) + ) + ) + if covariance_type == "full": + for _, (w, m, c) in enumerate(zip(weights, means, precisions["full"])): + X.append(rng.multivariate_normal(m, c, int(np.round(w * n_samples)))) + + X = np.vstack(X) + return X + + +class RandomData: + def __init__(self, rng, n_samples=200, n_components=2, n_features=2, scale=50): + self.n_samples = n_samples + self.n_components = n_components + self.n_features = n_features + + self.weights = rng.rand(n_components) + self.weights = self.weights / self.weights.sum() + self.means = rng.rand(n_components, n_features) * scale + self.covariances = { + "spherical": 0.5 + rng.rand(n_components), + "diag": (0.5 + rng.rand(n_components, n_features)) ** 2, + "tied": make_spd_matrix(n_features, random_state=rng), + "full": np.array( + [ + make_spd_matrix(n_features, random_state=rng) * 0.5 + for _ in range(n_components) + ] + ), + } + self.precisions = { + "spherical": 1.0 / self.covariances["spherical"], + "diag": 1.0 / self.covariances["diag"], + "tied": linalg.inv(self.covariances["tied"]), + "full": np.array( + [linalg.inv(covariance) for covariance in self.covariances["full"]] + ), + } + + self.X = dict( + zip( + COVARIANCE_TYPE, + [ + generate_data( + n_samples, + n_features, + self.weights, + self.means, + self.covariances, + covar_type, + ) + for covar_type in COVARIANCE_TYPE + ], + ) + ) + self.Y = np.hstack( + [ + np.full(int(np.round(w * n_samples)), k, dtype=int) + for k, w in enumerate(self.weights) + ] + ) + + +def test_gaussian_mixture_attributes(): + # test bad parameters + rng = np.random.RandomState(0) + X = rng.rand(10, 2) + + # test good parameters + n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1 + covariance_type, init_params = "full", "random" + gmm = GaussianMixture( + n_components=n_components, + tol=tol, + n_init=n_init, + max_iter=max_iter, + reg_covar=reg_covar, + covariance_type=covariance_type, + init_params=init_params, + ).fit(X) + + assert gmm.n_components == n_components + assert gmm.covariance_type == covariance_type + assert gmm.tol == tol + assert gmm.reg_covar == reg_covar + assert gmm.max_iter == max_iter + assert gmm.n_init == n_init + assert gmm.init_params == init_params + + +def test_check_weights(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components = rand_data.n_components + X = rand_data.X["full"] + + g = GaussianMixture(n_components=n_components) + + # Check bad shape + weights_bad_shape = rng.rand(n_components, 1) + g.weights_init = weights_bad_shape + msg = re.escape( + "The parameter 'weights' should have the shape of " + f"({n_components},), but got {str(weights_bad_shape.shape)}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check bad range + weights_bad_range = rng.rand(n_components) + 1 + g.weights_init = weights_bad_range + msg = re.escape( + "The parameter 'weights' should be in the range [0, 1], but got" + f" max value {np.min(weights_bad_range):.5f}, " + f"min value {np.max(weights_bad_range):.5f}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check bad normalization + weights_bad_norm = rng.rand(n_components) + weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1) + g.weights_init = weights_bad_norm + msg = re.escape( + "The parameter 'weights' should be normalized, " + f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}" + ) + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check good weights matrix + weights = rand_data.weights + g = GaussianMixture(weights_init=weights, n_components=n_components) + g.fit(X) + assert_array_equal(weights, g.weights_init) + + +def test_check_means(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components, n_features = rand_data.n_components, rand_data.n_features + X = rand_data.X["full"] + + g = GaussianMixture(n_components=n_components) + + # Check means bad shape + means_bad_shape = rng.rand(n_components + 1, n_features) + g.means_init = means_bad_shape + msg = "The parameter 'means' should have the shape of " + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check good means matrix + means = rand_data.means + g.means_init = means + g.fit(X) + assert_array_equal(means, g.means_init) + + +def test_check_precisions(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + + n_components, n_features = rand_data.n_components, rand_data.n_features + + # Define the bad precisions for each covariance_type + precisions_bad_shape = { + "full": np.ones((n_components + 1, n_features, n_features)), + "tied": np.ones((n_features + 1, n_features + 1)), + "diag": np.ones((n_components + 1, n_features)), + "spherical": np.ones((n_components + 1)), + } + + # Define not positive-definite precisions + precisions_not_pos = np.ones((n_components, n_features, n_features)) + precisions_not_pos[0] = np.eye(n_features) + precisions_not_pos[0, 0, 0] = -1.0 + + precisions_not_positive = { + "full": precisions_not_pos, + "tied": precisions_not_pos[0], + "diag": np.full((n_components, n_features), -1.0), + "spherical": np.full(n_components, -1.0), + } + + not_positive_errors = { + "full": "symmetric, positive-definite", + "tied": "symmetric, positive-definite", + "diag": "positive", + "spherical": "positive", + } + + for covar_type in COVARIANCE_TYPE: + X = RandomData(rng).X[covar_type] + g = GaussianMixture( + n_components=n_components, covariance_type=covar_type, random_state=rng + ) + + # Check precisions with bad shapes + g.precisions_init = precisions_bad_shape[covar_type] + msg = f"The parameter '{covar_type} precision' should have the shape of" + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check not positive precisions + g.precisions_init = precisions_not_positive[covar_type] + msg = f"'{covar_type} precision' should be {not_positive_errors[covar_type]}" + with pytest.raises(ValueError, match=msg): + g.fit(X) + + # Check the correct init of precisions_init + g.precisions_init = rand_data.precisions[covar_type] + g.fit(X) + assert_array_equal(rand_data.precisions[covar_type], g.precisions_init) + + +def test_suffstat_sk_full(): + # compare the precision matrix compute from the + # EmpiricalCovariance.covariance fitted on X*sqrt(resp) + # with _sufficient_sk_full, n_components=1 + rng = np.random.RandomState(0) + n_samples, n_features = 500, 2 + + # special case 1, assuming data is "centered" + X = rng.rand(n_samples, n_features) + resp = rng.rand(n_samples, 1) + X_resp = np.sqrt(resp) * X + nk = np.array([n_samples]) + xk = np.zeros((1, n_features)) + covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + ecov = EmpiricalCovariance(assume_centered=True) + ecov.fit(X_resp) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") + precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) + precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) + assert_array_almost_equal(precs_est, precs_pred) + + # special case 2, assuming resp are all ones + resp = np.ones((n_samples, 1)) + nk = np.array([n_samples]) + xk = X.mean(axis=0).reshape((1, -1)) + covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + ecov = EmpiricalCovariance(assume_centered=False) + ecov.fit(X) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred, "full") + precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred]) + precs_est = np.array([linalg.inv(cov) for cov in covars_pred]) + assert_array_almost_equal(precs_est, precs_pred) + + +def test_suffstat_sk_tied(): + # use equation Nk * Sk / N = S_tied + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 500, 2, 2 + + resp = rng.rand(n_samples, n_components) + resp = resp / resp.sum(axis=1)[:, np.newaxis] + X = rng.rand(n_samples, n_features) + nk = resp.sum(axis=0) + xk = np.dot(resp.T, X) / nk[:, np.newaxis] + + covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + covars_pred_full = ( + np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full, 0) / n_samples + ) + + covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0) + + ecov = EmpiricalCovariance() + ecov.covariance_ = covars_pred_full + assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, "tied") + precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T) + precs_est = linalg.inv(covars_pred_tied) + assert_array_almost_equal(precs_est, precs_pred) + + +def test_suffstat_sk_diag(): + # test against 'full' case + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 500, 2, 2 + + resp = rng.rand(n_samples, n_components) + resp = resp / resp.sum(axis=1)[:, np.newaxis] + X = rng.rand(n_samples, n_features) + nk = resp.sum(axis=0) + xk = np.dot(resp.T, X) / nk[:, np.newaxis] + covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0) + covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0) + + ecov = EmpiricalCovariance() + for cov_full, cov_diag in zip(covars_pred_full, covars_pred_diag): + ecov.covariance_ = np.diag(np.diag(cov_full)) + cov_diag = np.diag(cov_diag) + assert_almost_equal(ecov.error_norm(cov_diag, norm="frobenius"), 0) + assert_almost_equal(ecov.error_norm(cov_diag, norm="spectral"), 0) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, "diag") + assert_almost_equal(covars_pred_diag, 1.0 / precs_chol_pred**2) + + +def test_gaussian_suffstat_sk_spherical(): + # computing spherical covariance equals to the variance of one-dimension + # data after flattening, n_components=1 + rng = np.random.RandomState(0) + n_samples, n_features = 500, 2 + + X = rng.rand(n_samples, n_features) + X = X - X.mean() + resp = np.ones((n_samples, 1)) + nk = np.array([n_samples]) + xk = X.mean() + covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0) + covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / ( + n_features * n_samples + ) + assert_almost_equal(covars_pred_spherical, covars_pred_spherical2) + + # check the precision computation + precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, "spherical") + assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2) + + +def test_compute_log_det_cholesky(): + n_features = 2 + rand_data = RandomData(np.random.RandomState(0)) + + for covar_type in COVARIANCE_TYPE: + covariance = rand_data.covariances[covar_type] + + if covar_type == "full": + predected_det = np.array([linalg.det(cov) for cov in covariance]) + elif covar_type == "tied": + predected_det = linalg.det(covariance) + elif covar_type == "diag": + predected_det = np.array([np.prod(cov) for cov in covariance]) + elif covar_type == "spherical": + predected_det = covariance**n_features + + # We compute the cholesky decomposition of the covariance matrix + expected_det = _compute_log_det_cholesky( + _compute_precision_cholesky(covariance, covar_type), + covar_type, + n_features=n_features, + ) + assert_array_almost_equal(expected_det, -0.5 * np.log(predected_det)) + + +def _naive_lmvnpdf_diag(X, means, covars): + resp = np.empty((len(X), len(means))) + stds = np.sqrt(covars) + for i, (mean, std) in enumerate(zip(means, stds)): + resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1) + return resp + + +def test_gaussian_mixture_log_probabilities(): + from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob + + # test against with _naive_lmvnpdf_diag + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_samples = 500 + n_features = rand_data.n_features + n_components = rand_data.n_components + + means = rand_data.means + covars_diag = rng.rand(n_components, n_features) + X = rng.rand(n_samples, n_features) + log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag) + + # full covariances + precs_full = np.array([np.diag(1.0 / np.sqrt(x)) for x in covars_diag]) + + log_prob = _estimate_log_gaussian_prob(X, means, precs_full, "full") + assert_array_almost_equal(log_prob, log_prob_naive) + + # diag covariances + precs_chol_diag = 1.0 / np.sqrt(covars_diag) + log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, "diag") + assert_array_almost_equal(log_prob, log_prob_naive) + + # tied + covars_tied = np.array([x for x in covars_diag]).mean(axis=0) + precs_tied = np.diag(np.sqrt(1.0 / covars_tied)) + + log_prob_naive = _naive_lmvnpdf_diag(X, means, [covars_tied] * n_components) + log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, "tied") + + assert_array_almost_equal(log_prob, log_prob_naive) + + # spherical + covars_spherical = covars_diag.mean(axis=1) + precs_spherical = 1.0 / np.sqrt(covars_diag.mean(axis=1)) + log_prob_naive = _naive_lmvnpdf_diag( + X, means, [[k] * n_features for k in covars_spherical] + ) + log_prob = _estimate_log_gaussian_prob(X, means, precs_spherical, "spherical") + assert_array_almost_equal(log_prob, log_prob_naive) + + +# skip tests on weighted_log_probabilities, log_weights + + +def test_gaussian_mixture_estimate_log_prob_resp(): + # test whether responsibilities are normalized + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=5) + n_samples = rand_data.n_samples + n_features = rand_data.n_features + n_components = rand_data.n_components + + X = rng.rand(n_samples, n_features) + for covar_type in COVARIANCE_TYPE: + weights = rand_data.weights + means = rand_data.means + precisions = rand_data.precisions[covar_type] + g = GaussianMixture( + n_components=n_components, + random_state=rng, + weights_init=weights, + means_init=means, + precisions_init=precisions, + covariance_type=covar_type, + ) + g.fit(X) + resp = g.predict_proba(X) + assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples)) + assert_array_equal(g.weights_init, weights) + assert_array_equal(g.means_init, means) + assert_array_equal(g.precisions_init, precisions) + + +def test_gaussian_mixture_predict_predict_proba(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + g = GaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions[covar_type], + covariance_type=covar_type, + ) + + # Check a warning message arrive if we don't do fit + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' " + "with appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + g.predict(X) + + g.fit(X) + Y_pred = g.predict(X) + Y_pred_proba = g.predict_proba(X).argmax(axis=1) + assert_array_equal(Y_pred, Y_pred_proba) + assert adjusted_rand_score(Y, Y_pred) > 0.95 + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize( + "seed, max_iter, tol", + [ + (0, 2, 1e-7), # strict non-convergence + (1, 2, 1e-1), # loose non-convergence + (3, 300, 1e-7), # strict convergence + (4, 300, 1e-1), # loose convergence + ], +) +def test_gaussian_mixture_fit_predict(seed, max_iter, tol): + rng = np.random.RandomState(seed) + rand_data = RandomData(rng) + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + Y = rand_data.Y + g = GaussianMixture( + n_components=rand_data.n_components, + random_state=rng, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions[covar_type], + covariance_type=covar_type, + max_iter=max_iter, + tol=tol, + ) + + # check if fit_predict(X) is equivalent to fit(X).predict(X) + f = copy.deepcopy(g) + Y_pred1 = f.fit(X).predict(X) + Y_pred2 = g.fit_predict(X) + assert_array_equal(Y_pred1, Y_pred2) + assert adjusted_rand_score(Y, Y_pred2) > 0.95 + + +def test_gaussian_mixture_fit_predict_n_init(): + # Check that fit_predict is equivalent to fit.predict, when n_init > 1 + X = np.random.RandomState(0).randn(1000, 5) + gm = GaussianMixture(n_components=5, n_init=5, random_state=0) + y_pred1 = gm.fit_predict(X) + y_pred2 = gm.predict(X) + assert_array_equal(y_pred1, y_pred2) + + +def test_gaussian_mixture_fit(): + # recover the ground truth + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_features = rand_data.n_features + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=20, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + g.fit(X) + + # needs more data to pass the test with rtol=1e-7 + assert_allclose( + np.sort(g.weights_), np.sort(rand_data.weights), rtol=0.1, atol=1e-2 + ) + + arg_idx1 = g.means_[:, 0].argsort() + arg_idx2 = rand_data.means[:, 0].argsort() + assert_allclose( + g.means_[arg_idx1], rand_data.means[arg_idx2], rtol=0.1, atol=1e-2 + ) + + if covar_type == "full": + prec_pred = g.precisions_ + prec_test = rand_data.precisions["full"] + elif covar_type == "tied": + prec_pred = np.array([g.precisions_] * n_components) + prec_test = np.array([rand_data.precisions["tied"]] * n_components) + elif covar_type == "spherical": + prec_pred = np.array([np.eye(n_features) * c for c in g.precisions_]) + prec_test = np.array( + [np.eye(n_features) * c for c in rand_data.precisions["spherical"]] + ) + elif covar_type == "diag": + prec_pred = np.array([np.diag(d) for d in g.precisions_]) + prec_test = np.array([np.diag(d) for d in rand_data.precisions["diag"]]) + + arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort() + arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort() + for k, h in zip(arg_idx1, arg_idx2): + ecov = EmpiricalCovariance() + ecov.covariance_ = prec_test[h] + # the accuracy depends on the number of data and randomness, rng + assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15) + + +def test_gaussian_mixture_fit_best_params(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + n_init = 10 + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + ll = [] + for _ in range(n_init): + g.fit(X) + ll.append(g.score(X)) + ll = np.array(ll) + g_best = GaussianMixture( + n_components=n_components, + n_init=n_init, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + g_best.fit(X) + assert_almost_equal(ll.min(), g_best.score(X)) + + +def test_gaussian_mixture_fit_convergence_warning(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=1) + n_components = rand_data.n_components + max_iter = 1 + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=max_iter, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + "Best performing initialization did not converge. " + "Try different init parameters, or increase max_iter, " + "tol, or check for degenerate data." + ) + with pytest.warns(ConvergenceWarning, match=msg): + g.fit(X) + + +def test_multiple_init(): + # Test that multiple inits does not much worse than a single one + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 5, 2 + X = rng.randn(n_samples, n_features) + for cv_type in COVARIANCE_TYPE: + train1 = ( + GaussianMixture( + n_components=n_components, covariance_type=cv_type, random_state=0 + ) + .fit(X) + .score(X) + ) + train2 = ( + GaussianMixture( + n_components=n_components, + covariance_type=cv_type, + random_state=0, + n_init=5, + ) + .fit(X) + .score(X) + ) + assert train2 >= train1 + + +def test_gaussian_mixture_n_parameters(): + # Test that the right number of parameters is estimated + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 5, 2 + X = rng.randn(n_samples, n_features) + n_params = {"spherical": 13, "diag": 21, "tied": 26, "full": 41} + for cv_type in COVARIANCE_TYPE: + g = GaussianMixture( + n_components=n_components, covariance_type=cv_type, random_state=rng + ).fit(X) + assert g._n_parameters() == n_params[cv_type] + + +def test_bic_1d_1component(): + # Test all of the covariance_types return the same BIC score for + # 1-dimensional, 1 component fits. + rng = np.random.RandomState(0) + n_samples, n_dim, n_components = 100, 1, 1 + X = rng.randn(n_samples, n_dim) + bic_full = ( + GaussianMixture( + n_components=n_components, covariance_type="full", random_state=rng + ) + .fit(X) + .bic(X) + ) + for covariance_type in ["tied", "diag", "spherical"]: + bic = ( + GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + random_state=rng, + ) + .fit(X) + .bic(X) + ) + assert_almost_equal(bic_full, bic) + + +def test_gaussian_mixture_aic_bic(): + # Test the aic and bic criteria + rng = np.random.RandomState(0) + n_samples, n_features, n_components = 50, 3, 2 + X = rng.randn(n_samples, n_features) + # standard gaussian entropy + sgh = 0.5 * ( + fast_logdet(np.cov(X.T, bias=1)) + n_features * (1 + np.log(2 * np.pi)) + ) + for cv_type in COVARIANCE_TYPE: + g = GaussianMixture( + n_components=n_components, + covariance_type=cv_type, + random_state=rng, + max_iter=200, + ) + g.fit(X) + aic = 2 * n_samples * sgh + 2 * g._n_parameters() + bic = 2 * n_samples * sgh + np.log(n_samples) * g._n_parameters() + bound = n_features / np.sqrt(n_samples) + assert (g.aic(X) - aic) / n_samples < bound + assert (g.bic(X) - bic) / n_samples < bound + + +def test_gaussian_mixture_verbose(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + g = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + verbose=1, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + verbose=2, + ) + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + g.fit(X) + h.fit(X) + finally: + sys.stdout = old_stdout + + +@pytest.mark.filterwarnings("ignore:.*did not converge.*") +@pytest.mark.parametrize("seed", (0, 1, 2)) +def test_warm_start(seed): + random_state = seed + rng = np.random.RandomState(random_state) + n_samples, n_features, n_components = 500, 2, 2 + X = rng.rand(n_samples, n_features) + + # Assert the warm_start give the same result for the same number of iter + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=2, + reg_covar=0, + random_state=random_state, + warm_start=False, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=1, + reg_covar=0, + random_state=random_state, + warm_start=True, + ) + + g.fit(X) + score1 = h.fit(X).score(X) + score2 = h.fit(X).score(X) + + assert_almost_equal(g.weights_, h.weights_) + assert_almost_equal(g.means_, h.means_) + assert_almost_equal(g.precisions_, h.precisions_) + assert score2 > score1 + + # Assert that by using warm_start we can converge to a good solution + g = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=5, + reg_covar=0, + random_state=random_state, + warm_start=False, + tol=1e-6, + ) + h = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=5, + reg_covar=0, + random_state=random_state, + warm_start=True, + tol=1e-6, + ) + + g.fit(X) + assert not g.converged_ + + h.fit(X) + # depending on the data there is large variability in the number of + # refit necessary to converge due to the complete randomness of the + # data + for _ in range(1000): + h.fit(X) + if h.converged_: + break + assert h.converged_ + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +def test_convergence_detected_with_warm_start(): + # We check that convergence is detected when warm_start=True + rng = np.random.RandomState(0) + rand_data = RandomData(rng) + n_components = rand_data.n_components + X = rand_data.X["full"] + + for max_iter in (1, 2, 50): + gmm = GaussianMixture( + n_components=n_components, + warm_start=True, + max_iter=max_iter, + random_state=rng, + ) + for _ in range(100): + gmm.fit(X) + if gmm.converged_: + break + assert gmm.converged_ + assert max_iter >= gmm.n_iter_ + + +def test_score(): + covar_type = "full" + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + X = rand_data.X[covar_type] + + # Check the error message if we don't call fit + gmm1 = GaussianMixture( + n_components=n_components, + n_init=1, + max_iter=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + gmm1.score(X) + + # Check score value + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + gmm1.fit(X) + gmm_score = gmm1.score(X) + gmm_score_proba = gmm1.score_samples(X).mean() + assert_almost_equal(gmm_score, gmm_score_proba) + + # Check if the score increase + gmm2 = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ).fit(X) + assert gmm2.score(X) > gmm1.score(X) + + +def test_score_samples(): + covar_type = "full" + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + X = rand_data.X[covar_type] + + # Check the error message if we don't call fit + gmm = GaussianMixture( + n_components=n_components, + n_init=1, + reg_covar=0, + random_state=rng, + covariance_type=covar_type, + ) + msg = ( + "This GaussianMixture instance is not fitted yet. Call 'fit' with " + "appropriate arguments before using this estimator." + ) + with pytest.raises(NotFittedError, match=msg): + gmm.score_samples(X) + + gmm_score_samples = gmm.fit(X).score_samples(X) + assert gmm_score_samples.shape[0] == rand_data.n_samples + + +def test_monotonic_likelihood(): + # We check that each step of the EM without regularization improve + # monotonically the training set likelihood + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + gmm = GaussianMixture( + n_components=n_components, + covariance_type=covar_type, + reg_covar=0, + warm_start=True, + max_iter=1, + random_state=rng, + tol=1e-7, + ) + current_log_likelihood = -np.inf + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ConvergenceWarning) + # Do one training iteration at a time so we can make sure that the + # training log likelihood increases after each iteration. + for _ in range(600): + prev_log_likelihood = current_log_likelihood + current_log_likelihood = gmm.fit(X).score(X) + assert current_log_likelihood >= prev_log_likelihood + + if gmm.converged_: + break + + assert gmm.converged_ + + +def test_regularisation(): + # We train the GaussianMixture on degenerate data by defining two clusters + # of a 0 covariance. + rng = np.random.RandomState(0) + n_samples, n_features = 10, 5 + + X = np.vstack( + (np.ones((n_samples // 2, n_features)), np.zeros((n_samples // 2, n_features))) + ) + + for covar_type in COVARIANCE_TYPE: + gmm = GaussianMixture( + n_components=n_samples, + reg_covar=0, + covariance_type=covar_type, + random_state=rng, + ) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + msg = re.escape( + "Fitting the mixture model failed because some components have" + " ill-defined empirical covariance (for instance caused by " + "singleton or collapsed samples). Try to decrease the number " + "of components, or increase reg_covar." + ) + with pytest.raises(ValueError, match=msg): + gmm.fit(X) + + gmm.set_params(reg_covar=1e-6).fit(X) + + +def test_property(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7) + n_components = rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + gmm = GaussianMixture( + n_components=n_components, + covariance_type=covar_type, + random_state=rng, + n_init=5, + ) + gmm.fit(X) + if covar_type == "full": + for prec, covar in zip(gmm.precisions_, gmm.covariances_): + assert_array_almost_equal(linalg.inv(prec), covar) + elif covar_type == "tied": + assert_array_almost_equal(linalg.inv(gmm.precisions_), gmm.covariances_) + else: + assert_array_almost_equal(gmm.precisions_, 1.0 / gmm.covariances_) + + +def test_sample(): + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=7, n_components=3) + n_features, n_components = rand_data.n_features, rand_data.n_components + + for covar_type in COVARIANCE_TYPE: + X = rand_data.X[covar_type] + + gmm = GaussianMixture( + n_components=n_components, covariance_type=covar_type, random_state=rng + ) + # To sample we need that GaussianMixture is fitted + msg = "This GaussianMixture instance is not fitted" + with pytest.raises(NotFittedError, match=msg): + gmm.sample(0) + gmm.fit(X) + + msg = "Invalid value for 'n_samples'" + with pytest.raises(ValueError, match=msg): + gmm.sample(0) + + # Just to make sure the class samples correctly + n_samples = 20000 + X_s, y_s = gmm.sample(n_samples) + + for k in range(n_components): + if covar_type == "full": + assert_array_almost_equal( + gmm.covariances_[k], np.cov(X_s[y_s == k].T), decimal=1 + ) + elif covar_type == "tied": + assert_array_almost_equal( + gmm.covariances_, np.cov(X_s[y_s == k].T), decimal=1 + ) + elif covar_type == "diag": + assert_array_almost_equal( + gmm.covariances_[k], np.diag(np.cov(X_s[y_s == k].T)), decimal=1 + ) + else: + assert_array_almost_equal( + gmm.covariances_[k], + np.var(X_s[y_s == k] - gmm.means_[k]), + decimal=1, + ) + + means_s = np.array([np.mean(X_s[y_s == k], 0) for k in range(n_components)]) + assert_array_almost_equal(gmm.means_, means_s, decimal=1) + + # Check shapes of sampled data, see + # https://github.com/scikit-learn/scikit-learn/issues/7701 + assert X_s.shape == (n_samples, n_features) + + for sample_size in range(1, 100): + X_s, _ = gmm.sample(sample_size) + assert X_s.shape == (sample_size, n_features) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +def test_init(): + # We check that by increasing the n_init number we have a better solution + for random_state in range(15): + rand_data = RandomData( + np.random.RandomState(random_state), n_samples=50, scale=1 + ) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm1 = GaussianMixture( + n_components=n_components, n_init=1, max_iter=1, random_state=random_state + ).fit(X) + gmm2 = GaussianMixture( + n_components=n_components, n_init=10, max_iter=1, random_state=random_state + ).fit(X) + + assert gmm2.lower_bound_ >= gmm1.lower_bound_ + + +def test_gaussian_mixture_setting_best_params(): + """`GaussianMixture`'s best_parameters, `n_iter_` and `lower_bound_` + must be set appropriately in the case of divergence. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/18216 + """ + rnd = np.random.RandomState(0) + n_samples = 30 + X = rnd.uniform(size=(n_samples, 3)) + + # following initialization parameters were found to lead to divergence + means_init = np.array( + [ + [0.670637869618158, 0.21038256107384043, 0.12892629765485303], + [0.09394051075844147, 0.5759464955561779, 0.929296197576212], + [0.5033230372781258, 0.9569852381759425, 0.08654043447295741], + [0.18578301420435747, 0.5531158970919143, 0.19388943970532435], + [0.4548589928173794, 0.35182513658825276, 0.568146063202464], + [0.609279894978321, 0.7929063819678847, 0.9620097270828052], + ] + ) + precisions_init = np.array( + [ + 999999.999604483, + 999999.9990869573, + 553.7603944542167, + 204.78596008931834, + 15.867423501783637, + 85.4595728389735, + ] + ) + weights_init = [ + 0.03333333333333341, + 0.03333333333333341, + 0.06666666666666674, + 0.06666666666666674, + 0.7000000000000001, + 0.10000000000000007, + ] + + gmm = GaussianMixture( + covariance_type="spherical", + reg_covar=0, + means_init=means_init, + weights_init=weights_init, + random_state=rnd, + n_components=len(weights_init), + precisions_init=precisions_init, + max_iter=1, + ) + # ensure that no error is thrown during fit + gmm.fit(X) + + # check that the fit did not converge + assert not gmm.converged_ + + # check that parameters are set for gmm + for attr in [ + "weights_", + "means_", + "covariances_", + "precisions_cholesky_", + "n_iter_", + "lower_bound_", + ]: + assert hasattr(gmm, attr) + + +@pytest.mark.parametrize( + "init_params", ["random", "random_from_data", "k-means++", "kmeans"] +) +def test_init_means_not_duplicated(init_params, global_random_seed): + # Check that all initialisations provide not duplicated starting means + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm = GaussianMixture( + n_components=n_components, init_params=init_params, random_state=rng, max_iter=0 + ) + gmm.fit(X) + + means = gmm.means_ + for i_mean, j_mean in itertools.combinations(means, r=2): + assert not np.allclose(i_mean, j_mean) + + +@pytest.mark.parametrize( + "init_params", ["random", "random_from_data", "k-means++", "kmeans"] +) +def test_means_for_all_inits(init_params, global_random_seed): + # Check fitted means properties for all initializations + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + + gmm = GaussianMixture( + n_components=n_components, init_params=init_params, random_state=rng + ) + gmm.fit(X) + + assert gmm.means_.shape == (n_components, X.shape[1]) + assert np.all(X.min(axis=0) <= gmm.means_) + assert np.all(gmm.means_ <= X.max(axis=0)) + assert gmm.converged_ + + +def test_max_iter_zero(): + # Check that max_iter=0 returns initialisation as expected + # Pick arbitrary initial means and check equal to max_iter=0 + rng = np.random.RandomState(0) + rand_data = RandomData(rng, scale=5) + n_components = rand_data.n_components + X = rand_data.X["full"] + means_init = [[20, 30], [30, 25]] + gmm = GaussianMixture( + n_components=n_components, + random_state=rng, + means_init=means_init, + tol=1e-06, + max_iter=0, + ) + gmm.fit(X) + + assert_allclose(gmm.means_, means_init) + + +def test_gaussian_mixture_precisions_init_diag(): + """Check that we properly initialize `precision_cholesky_` when we manually + provide the precision matrix. + + In this regard, we check the consistency between estimating the precision + matrix and providing the same precision matrix as initialization. It should + lead to the same results with the same number of iterations. + + If the initialization is wrong then the number of iterations will increase. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/16944 + """ + # generate a toy dataset + n_samples = 300 + rng = np.random.RandomState(0) + shifted_gaussian = rng.randn(n_samples, 2) + np.array([20, 20]) + C = np.array([[0.0, -0.7], [3.5, 0.7]]) + stretched_gaussian = np.dot(rng.randn(n_samples, 2), C) + X = np.vstack([shifted_gaussian, stretched_gaussian]) + + # common parameters to check the consistency of precision initialization + n_components, covariance_type, reg_covar, random_state = 2, "diag", 1e-6, 0 + + # execute the manual initialization to compute the precision matrix: + # - run KMeans to have an initial guess + # - estimate the covariance + # - compute the precision matrix from the estimated covariance + resp = np.zeros((X.shape[0], n_components)) + label = ( + KMeans(n_clusters=n_components, n_init=1, random_state=random_state) + .fit(X) + .labels_ + ) + resp[np.arange(X.shape[0]), label] = 1 + _, _, covariance = _estimate_gaussian_parameters( + X, resp, reg_covar=reg_covar, covariance_type=covariance_type + ) + precisions_init = 1 / covariance + + gm_with_init = GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + reg_covar=reg_covar, + precisions_init=precisions_init, + random_state=random_state, + ).fit(X) + + gm_without_init = GaussianMixture( + n_components=n_components, + covariance_type=covariance_type, + reg_covar=reg_covar, + random_state=random_state, + ).fit(X) + + assert gm_without_init.n_iter_ == gm_with_init.n_iter_ + assert_allclose( + gm_with_init.precisions_cholesky_, gm_without_init.precisions_cholesky_ + ) + + +def _generate_data(seed, n_samples, n_features, n_components): + """Randomly generate samples and responsibilities.""" + rs = np.random.RandomState(seed) + X = rs.random_sample((n_samples, n_features)) + resp = rs.random_sample((n_samples, n_components)) + resp /= resp.sum(axis=1)[:, np.newaxis] + return X, resp + + +def _calculate_precisions(X, resp, covariance_type): + """Calculate precision matrix of X and its Cholesky decomposition + for the given covariance type. + """ + reg_covar = 1e-6 + weights, means, covariances = _estimate_gaussian_parameters( + X, resp, reg_covar, covariance_type + ) + precisions_cholesky = _compute_precision_cholesky(covariances, covariance_type) + + _, n_components = resp.shape + # Instantiate a `GaussianMixture` model in order to use its + # `_set_parameters` method to return the `precisions_` and + # `precisions_cholesky_` from matching the `covariance_type` + # provided. + gmm = GaussianMixture(n_components=n_components, covariance_type=covariance_type) + params = (weights, means, covariances, precisions_cholesky) + gmm._set_parameters(params) + return gmm.precisions_, gmm.precisions_cholesky_ + + +@pytest.mark.parametrize("covariance_type", COVARIANCE_TYPE) +def test_gaussian_mixture_precisions_init(covariance_type, global_random_seed): + """Non-regression test for #26415.""" + + X, resp = _generate_data( + seed=global_random_seed, + n_samples=100, + n_features=3, + n_components=4, + ) + + precisions_init, desired_precisions_cholesky = _calculate_precisions( + X, resp, covariance_type + ) + gmm = GaussianMixture( + covariance_type=covariance_type, precisions_init=precisions_init + ) + gmm._initialize(X, resp) + actual_precisions_cholesky = gmm.precisions_cholesky_ + assert_allclose(actual_precisions_cholesky, desired_precisions_cholesky) + + +def test_gaussian_mixture_single_component_stable(): + """ + Non-regression test for #23032 ensuring 1-component GM works on only a + few samples. + """ + rng = np.random.RandomState(0) + X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3) + gm = GaussianMixture(n_components=1) + gm.fit(X).sample() + + +def test_gaussian_mixture_all_init_does_not_estimate_gaussian_parameters( + monkeypatch, + global_random_seed, +): + """When all init parameters are provided, the Gaussian parameters + are not estimated. + + Non-regression test for gh-26015. + """ + + mock = Mock(side_effect=_estimate_gaussian_parameters) + monkeypatch.setattr( + sklearn.mixture._gaussian_mixture, "_estimate_gaussian_parameters", mock + ) + + rng = np.random.RandomState(global_random_seed) + rand_data = RandomData(rng) + + gm = GaussianMixture( + n_components=rand_data.n_components, + weights_init=rand_data.weights, + means_init=rand_data.means, + precisions_init=rand_data.precisions["full"], + random_state=rng, + ) + gm.fit(rand_data.X["full"]) + # The initial gaussian parameters are not estimated. They are estimated for every + # m_step. + assert mock.call_count == gm.n_iter_ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py new file mode 100644 index 0000000000000000000000000000000000000000..9c98d150f06a8c7685d24c083e2ed2866f17c8ca --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/mixture/tests/test_mixture.py @@ -0,0 +1,30 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +import pytest + +from sklearn.mixture import BayesianGaussianMixture, GaussianMixture + + +@pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) +def test_gaussian_mixture_n_iter(estimator): + # check that n_iter is the number of iteration performed. + rng = np.random.RandomState(0) + X = rng.rand(10, 5) + max_iter = 1 + estimator.set_params(max_iter=max_iter) + estimator.fit(X) + assert estimator.n_iter_ == max_iter + + +@pytest.mark.parametrize("estimator", [GaussianMixture(), BayesianGaussianMixture()]) +def test_mixture_n_components_greater_than_n_samples_error(estimator): + """Check error when n_components <= n_samples""" + rng = np.random.RandomState(0) + X = rng.rand(10, 5) + estimator.set_params(n_components=12) + + msg = "Expected n_samples >= n_components" + with pytest.raises(ValueError, match=msg): + estimator.fit(X)