repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
kwinkunks/geopandas
tests/test_overlay.py
8
3242
from __future__ import absolute_import import tempfile import shutil from shapely.geometry import Point from geopandas import GeoDataFrame, read_file from geopandas.tools import overlay from .util import unittest, download_nybb class TestDataFrame(unittest.TestCase): def setUp(self): N = 10 nybb_filename = download_nybb() self.polydf = read_file('/nybb_14a_av/nybb.shp', vfs='zip://' + nybb_filename) self.tempdir = tempfile.mkdtemp() self.crs = {'init': 'epsg:4326'} b = [int(x) for x in self.polydf.total_bounds] self.polydf2 = GeoDataFrame([ {'geometry' : Point(x, y).buffer(10000), 'value1': x + y, 'value2': x - y} for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)), range(b[1], b[3], int((b[3]-b[1])/N)))], crs=self.crs) self.pointdf = GeoDataFrame([ {'geometry' : Point(x, y), 'value1': x + y, 'value2': x - y} for x, y in zip(range(b[0], b[2], int((b[2]-b[0])/N)), range(b[1], b[3], int((b[3]-b[1])/N)))], crs=self.crs) # TODO this appears to be necessary; # why is the sindex not generated automatically? self.polydf2._generate_sindex() self.union_shape = (180, 7) def tearDown(self): shutil.rmtree(self.tempdir) def test_union(self): df = overlay(self.polydf, self.polydf2, how="union") self.assertTrue(type(df) is GeoDataFrame) self.assertEquals(df.shape, self.union_shape) self.assertTrue('value1' in df.columns and 'Shape_Area' in df.columns) def test_union_no_index(self): # explicitly ignore indicies dfB = overlay(self.polydf, self.polydf2, how="union", use_sindex=False) self.assertEquals(dfB.shape, self.union_shape) # remove indicies from df self.polydf._sindex = None self.polydf2._sindex = None dfC = overlay(self.polydf, self.polydf2, how="union") self.assertEquals(dfC.shape, self.union_shape) def test_intersection(self): df = overlay(self.polydf, self.polydf2, how="intersection") self.assertIsNotNone(df['BoroName'][0]) self.assertEquals(df.shape, (68, 7)) def test_identity(self): df = overlay(self.polydf, self.polydf2, how="identity") self.assertEquals(df.shape, (154, 7)) def test_symmetric_difference(self): df = overlay(self.polydf, self.polydf2, how="symmetric_difference") self.assertEquals(df.shape, (122, 7)) def test_difference(self): df = overlay(self.polydf, self.polydf2, how="difference") self.assertEquals(df.shape, (86, 7)) def test_bad_how(self): self.assertRaises(ValueError, overlay, self.polydf, self.polydf, how="spandex") def test_nonpoly(self): self.assertRaises(TypeError, overlay, self.pointdf, self.polydf, how="union") def test_duplicate_column_name(self): polydf2r = self.polydf2.rename(columns={'value2': 'Shape_Area'}) df = overlay(self.polydf, polydf2r, how="union") self.assertTrue('Shape_Area_2' in df.columns and 'Shape_Area' in df.columns)
bsd-3-clause
shaypal5/cachier
setup.py
1
2249
"""Setup file for the Cachier package.""" # This file is part of Cachier. # https://github.com/shaypal5/cachier # Licensed under the MIT license: # http://www.opensource.org/licenses/MIT-license # Copyright (c) 2016, Shay Palachy <[email protected]> try: from setuptools import setup except ImportError: from distutils.core import setup import versioneer TEST_REQUIRES = [ # tests and coverages 'pytest', 'coverage', 'pytest-cov', # for reading configfutation 'birch', # to connect to the test mongodb server 'pymongo', 'dnspython', # to test pandas dataframe as-param hashing with mongodb core 'pandas', ] README_RST = '' with open('README.rst') as f: README_RST = f.read() setup( name='cachier', version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description=('Persistent, stale-free, local and cross-machine caching for' ' Python functions.'), long_description=README_RST, license='MIT', author='Shay Palachy', author_email='[email protected]', url='https://github.com/shaypal5/cachier', packages=['cachier'], entry_points=''' [console_scripts] cachier=cachier.scripts.cli:cli ''', install_requires=[ 'watchdog', 'portalocker', 'pathtools', # for watchdog, who has dependency spec problem ], extras_require={ 'test': TEST_REQUIRES, }, platforms=['linux', 'osx', 'windows'], keywords=['cache', 'persistence', 'mongo', 'memoization', 'decorator'], classifiers=[ # Trove classifiers # (https://pypi.python.org/pypi?%3Aaction=list_classifiers) 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Utilities', 'Topic :: Other/Nonlisted Topic', 'Intended Audience :: Developers', ], )
mit
florian-f/sklearn
examples/decomposition/plot_ica_blind_source_separation.py
4
1512
""" ===================================== Blind source separation using FastICA ===================================== :ref:`ICA` is used to estimate sources given noisy measurements. Imagine 2 instruments playing simultaneously and 2 microphones recording the mixed signals. ICA is used to recover the sources ie. what is played by each instrument. """ print(__doc__) import numpy as np import pylab as pl from sklearn.decomposition import FastICA ############################################################################### # Generate sample data np.random.seed(0) n_samples = 2000 time = np.linspace(0, 10, n_samples) s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal S = np.c_[s1, s2] S += 0.2 * np.random.normal(size=S.shape) # Add noise S /= S.std(axis=0) # Standardize data # Mix data A = np.array([[1, 1], [0.5, 2]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations # Compute ICA ica = FastICA() S_ = ica.fit(X).transform(X) # Get the estimated sources A_ = ica.get_mixing_matrix() # Get estimated mixing matrix assert np.allclose(X, np.dot(S_, A_.T)) ############################################################################### # Plot results pl.figure() pl.subplot(3, 1, 1) pl.plot(S) pl.title('True Sources') pl.subplot(3, 1, 2) pl.plot(X) pl.title('Observations (mixed signal)') pl.subplot(3, 1, 3) pl.plot(S_) pl.title('ICA estimated sources') pl.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36) pl.show()
bsd-3-clause
waterponey/scikit-learn
examples/linear_model/plot_iris_logistic.py
119
1679
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Logistic Regression 3-class Classifier ========================================================= Show below is a logistic-regression classifiers decision boundaries on the `iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints are colored according to their labels. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(4, 3)) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
altairpearl/scikit-learn
examples/cluster/plot_segmentation_toy.py
91
3522
""" =========================================== Spectral clustering for image segmentation =========================================== In this example, an image with connected circles is generated and spectral clustering is used to separate the circles. In these settings, the :ref:`spectral_clustering` approach solves the problem know as 'normalized graph cuts': the image is seen as a graph of connected voxels, and the spectral clustering algorithm amounts to choosing graph cuts defining regions while minimizing the ratio of the gradient along the cut, and the volume of the region. As the algorithm tries to balance the volume (ie balance the region sizes), if we take circles with different sizes, the segmentation fails. In addition, as there is no useful information in the intensity of the image, or its gradient, we choose to perform the spectral clustering on a graph that is only weakly informed by the gradient. This is close to performing a Voronoi partition of the graph. In addition, we use the mask of the objects to restrict the graph to the outline of the objects. In this example, we are interested in separating the objects one from the other, and not from the background. """ print(__doc__) # Authors: Emmanuelle Gouillart <[email protected]> # Gael Varoquaux <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction import image from sklearn.cluster import spectral_clustering ############################################################################### l = 100 x, y = np.indices((l, l)) center1 = (28, 24) center2 = (40, 50) center3 = (67, 58) center4 = (24, 70) radius1, radius2, radius3, radius4 = 16, 14, 15, 14 circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2 circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2 circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2 circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2 ############################################################################### # 4 circles img = circle1 + circle2 + circle3 + circle4 # We use a mask that limits to the foreground: the problem that we are # interested in here is not separating the objects from the background, # but separating them one from the other. mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) # Convert the image into a graph with the value of the gradient on the # edges. graph = image.img_to_graph(img, mask=mask) # Take a decreasing function of the gradient: we take it weakly # dependent from the gradient the segmentation is close to a voronoi graph.data = np.exp(-graph.data / graph.data.std()) # Force the solver to be arpack, since amg is numerically # unstable on this example labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack') label_im = -np.ones(mask.shape) label_im[mask] = labels plt.matshow(img) plt.matshow(label_im) ############################################################################### # 2 circles img = circle1 + circle2 mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) graph = image.img_to_graph(img, mask=mask) graph.data = np.exp(-graph.data / graph.data.std()) labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack') label_im = -np.ones(mask.shape) label_im[mask] = labels plt.matshow(img) plt.matshow(label_im) plt.show()
bsd-3-clause
owaiskhan/Retransmission-Combining
gr-digital/examples/example_fll.py
17
4821
#!/usr/bin/env python from gnuradio import gr, digital from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser try: import scipy except ImportError: print "Error: could not import scipy (http://www.scipy.org/)" sys.exit(1) try: import pylab except ImportError: print "Error: could not import pylab (http://matplotlib.sourceforge.net/)" sys.exit(1) class example_fll(gr.top_block): def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset): gr.top_block.__init__(self) rrc_taps = gr.firdes.root_raised_cosine( sps, sps, 1.0, rolloff, ntaps) data = 2.0*scipy.random.randint(0, 2, N) - 1.0 data = scipy.exp(1j*poffset) * data self.src = gr.vector_source_c(data.tolist(), False) self.rrc = gr.interp_fir_filter_ccf(sps, rrc_taps) self.chn = gr.channel_model(noise, foffset, toffset) self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw) self.vsnk_src = gr.vector_sink_c() self.vsnk_fll = gr.vector_sink_c() self.vsnk_frq = gr.vector_sink_f() self.vsnk_phs = gr.vector_sink_f() self.vsnk_err = gr.vector_sink_f() self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll) self.connect(self.rrc, self.vsnk_src) self.connect((self.fll,1), self.vsnk_frq) self.connect((self.fll,2), self.vsnk_phs) self.connect((self.fll,3), self.vsnk_err) def main(): parser = OptionParser(option_class=eng_option, conflict_handler="resolve") parser.add_option("-N", "--nsamples", type="int", default=2000, help="Set the number of samples to process [default=%default]") parser.add_option("-S", "--sps", type="int", default=4, help="Set the samples per symbol [default=%default]") parser.add_option("-r", "--rolloff", type="eng_float", default=0.35, help="Set the rolloff factor [default=%default]") parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0, help="Set the loop bandwidth [default=%default]") parser.add_option("-n", "--ntaps", type="int", default=45, help="Set the number of taps in the filters [default=%default]") parser.add_option("", "--noise", type="eng_float", default=0.0, help="Set the simulation noise voltage [default=%default]") parser.add_option("-f", "--foffset", type="eng_float", default=0.2, help="Set the simulation's normalized frequency offset (in Hz) [default=%default]") parser.add_option("-t", "--toffset", type="eng_float", default=1.0, help="Set the simulation's timing offset [default=%default]") parser.add_option("-p", "--poffset", type="eng_float", default=0.0, help="Set the simulation's phase offset [default=%default]") (options, args) = parser.parse_args () # Adjust N for the interpolation by sps options.nsamples = options.nsamples // options.sps # Set up the program-under-test put = example_fll(options.nsamples, options.sps, options.rolloff, options.ntaps, options.bandwidth, options.noise, options.foffset, options.toffset, options.poffset) put.run() data_src = scipy.array(put.vsnk_src.data()) data_err = scipy.array(put.vsnk_err.data()) # Convert the FLL's LO frequency from rads/sec to Hz data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi) # adjust this to align with the data. There are 2 filters of # ntaps long and the channel introduces another 4 sample delay. data_fll = scipy.array(put.vsnk_fll.data()[2*options.ntaps-4:]) # Plot the FLL's LO frequency f1 = pylab.figure(1, figsize=(12,10)) s1 = f1.add_subplot(2,2,1) s1.plot(data_frq) s1.set_title("FLL LO") s1.set_xlabel("Samples") s1.set_ylabel("Frequency (normalized Hz)") # Plot the FLL's error s2 = f1.add_subplot(2,2,2) s2.plot(data_err) s2.set_title("FLL Error") s2.set_xlabel("Samples") s2.set_ylabel("FLL Loop error") # Plot the IQ symbols s3 = f1.add_subplot(2,2,3) s3.plot(data_src.real, data_src.imag, "o") s3.plot(data_fll.real, data_fll.imag, "rx") s3.set_title("IQ") s3.set_xlabel("Real part") s3.set_ylabel("Imag part") # Plot the symbols in time s4 = f1.add_subplot(2,2,4) s4.plot(data_src.real, "o-") s4.plot(data_fll.real, "rx-") s4.set_title("Symbols") s4.set_xlabel("Samples") s4.set_ylabel("Real Part of Signals") pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
jayflo/scikit-learn
examples/gaussian_process/gp_diabetes_dataset.py
223
1976
#!/usr/bin/python # -*- coding: utf-8 -*- """ ======================================================================== Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset ======================================================================== In this example, we fit a Gaussian Process model onto the diabetes dataset. We determine the correlation parameters with maximum likelihood estimation (MLE). We use an anisotropic squared exponential correlation model with a constant regression model. We also use a nugget of 1e-2 to account for the (strong) noise in the targets. We compute a cross-validation estimate of the coefficient of determination (R2) without reperforming MLE, using the set of correlation parameters found on the whole dataset. """ print(__doc__) # Author: Vincent Dubourg <[email protected]> # Licence: BSD 3 clause from sklearn import datasets from sklearn.gaussian_process import GaussianProcess from sklearn.cross_validation import cross_val_score, KFold # Load the dataset from scikit's data sets diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target # Instanciate a GP model gp = GaussianProcess(regr='constant', corr='absolute_exponential', theta0=[1e-4] * 10, thetaL=[1e-12] * 10, thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch') # Fit the GP model to the data performing maximum likelihood estimation gp.fit(X, y) # Deactivate maximum likelihood estimation for the cross-validation loop gp.theta0 = gp.theta_ # Given correlation parameter = MLE gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE # Perform a cross-validation estimate of the coefficient of determination using # the cross_validation module using all CPUs available on the machine K = 20 # folds R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean() print("The %d-Folds estimate of the coefficient of determination is R2 = %s" % (K, R2))
bsd-3-clause
wwf5067/statsmodels
statsmodels/tsa/filters/filtertools.py
25
12438
# -*- coding: utf-8 -*- """Linear Filters for time series analysis and testing TODO: * check common sequence in signature of filter functions (ar,ma,x) or (x,ar,ma) Created on Sat Oct 23 17:18:03 2010 Author: Josef-pktd """ #not original copied from various experimental scripts #version control history is there from statsmodels.compat.python import range import numpy as np import scipy.fftpack as fft from scipy import signal from scipy.signal.signaltools import _centered as trim_centered from ._utils import _maybe_get_pandas_wrapper def _pad_nans(x, head=None, tail=None): if np.ndim(x) == 1: if head is None and tail is None: return x elif head and tail: return np.r_[[np.nan] * head, x, [np.nan] * tail] elif tail is None: return np.r_[[np.nan] * head, x] elif head is None: return np.r_[x, [np.nan] * tail] elif np.ndim(x) == 2: if head is None and tail is None: return x elif head and tail: return np.r_[[[np.nan] * x.shape[1]] * head, x, [[np.nan] * x.shape[1]] * tail] elif tail is None: return np.r_[[[np.nan] * x.shape[1]] * head, x] elif head is None: return np.r_[x, [[np.nan] * x.shape[1]] * tail] else: raise ValueError("Nan-padding for ndim > 2 not implemented") #original changes and examples in sandbox.tsa.try_var_convolve # don't do these imports, here just for copied fftconvolve #get rid of these imports #from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \ # ifftn, fftfreq #from numpy import product,array def fftconvolveinv(in1, in2, mode="full"): """Convolve two N-dimensional arrays using FFT. See convolve. copied from scipy.signal.signaltools, but here used to try out inverse filter doesn't work or I can't get it to work 2010-10-23: looks ok to me for 1d, from results below with padded data array (fftp) but it doesn't work for multidimensional inverse filter (fftn) original signal.fftconvolve also uses fftn """ s1 = np.array(in1.shape) s2 = np.array(in2.shape) complex_result = (np.issubdtype(in1.dtype, np.complex) or np.issubdtype(in2.dtype, np.complex)) size = s1+s2-1 # Always use 2**n-sized FFT fsize = 2**np.ceil(np.log2(size)) IN1 = fft.fftn(in1,fsize) #IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made IN1 /= fft.fftn(in2,fsize) # use inverse filter # note the inverse is elementwise not matrix inverse # is this correct, NO doesn't seem to work for VARMA fslice = tuple([slice(0, int(sz)) for sz in size]) ret = fft.ifftn(IN1)[fslice].copy() del IN1 if not complex_result: ret = ret.real if mode == "full": return ret elif mode == "same": if np.product(s1,axis=0) > np.product(s2,axis=0): osize = s1 else: osize = s2 return trim_centered(ret,osize) elif mode == "valid": return trim_centered(ret,abs(s2-s1)+1) #code duplication with fftconvolveinv def fftconvolve3(in1, in2=None, in3=None, mode="full"): """Convolve two N-dimensional arrays using FFT. See convolve. for use with arma (old version: in1=num in2=den in3=data * better for consistency with other functions in1=data in2=num in3=den * note in2 and in3 need to have consistent dimension/shape since I'm using max of in2, in3 shapes and not the sum copied from scipy.signal.signaltools, but here used to try out inverse filter doesn't work or I can't get it to work 2010-10-23 looks ok to me for 1d, from results below with padded data array (fftp) but it doesn't work for multidimensional inverse filter (fftn) original signal.fftconvolve also uses fftn """ if (in2 is None) and (in3 is None): raise ValueError('at least one of in2 and in3 needs to be given') s1 = np.array(in1.shape) if not in2 is None: s2 = np.array(in2.shape) else: s2 = 0 if not in3 is None: s3 = np.array(in3.shape) s2 = max(s2, s3) # try this looks reasonable for ARMA #s2 = s3 complex_result = (np.issubdtype(in1.dtype, np.complex) or np.issubdtype(in2.dtype, np.complex)) size = s1+s2-1 # Always use 2**n-sized FFT fsize = 2**np.ceil(np.log2(size)) #convolve shorter ones first, not sure if it matters if not in2 is None: IN1 = fft.fftn(in2, fsize) if not in3 is None: IN1 /= fft.fftn(in3, fsize) # use inverse filter # note the inverse is elementwise not matrix inverse # is this correct, NO doesn't seem to work for VARMA IN1 *= fft.fftn(in1, fsize) fslice = tuple([slice(0, int(sz)) for sz in size]) ret = fft.ifftn(IN1)[fslice].copy() del IN1 if not complex_result: ret = ret.real if mode == "full": return ret elif mode == "same": if np.product(s1,axis=0) > np.product(s2,axis=0): osize = s1 else: osize = s2 return trim_centered(ret,osize) elif mode == "valid": return trim_centered(ret,abs(s2-s1)+1) #original changes and examples in sandbox.tsa.try_var_convolve #examples and tests are there def recursive_filter(x, ar_coeff, init=None): ''' Autoregressive, or recursive, filtering. Parameters ---------- x : array-like Time-series data. Should be 1d or n x 1. ar_coeff : array-like AR coefficients in reverse time order. See Notes init : array-like Initial values of the time-series prior to the first value of y. The default is zero. Returns ------- y : array Filtered array, number of columns determined by x and ar_coeff. If a pandas object is given, a pandas object is returned. Notes ----- Computes the recursive filter :: y[n] = ar_coeff[0] * y[n-1] + ... + ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n] where n_coeff = len(n_coeff). ''' _pandas_wrapper = _maybe_get_pandas_wrapper(x) x = np.asarray(x).squeeze() ar_coeff = np.asarray(ar_coeff).squeeze() if x.ndim > 1 or ar_coeff.ndim > 1: raise ValueError('x and ar_coeff have to be 1d') if init is not None: # integer init are treated differently in lfiltic if len(init) != len(ar_coeff): raise ValueError("ar_coeff must be the same length as init") init = np.asarray(init, dtype=float) if init is not None: zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x) else: zi = None y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi) if init is not None: result = y[0] else: result = y if _pandas_wrapper: return _pandas_wrapper(result) return result def convolution_filter(x, filt, nsides=2): ''' Linear filtering via convolution. Centered and backward displaced moving weighted average. Parameters ---------- x : array_like data array, 1d or 2d, if 2d then observations in rows filt : array_like Linear filter coefficients in reverse time-order. Should have the same number of dimensions as x though if 1d and ``x`` is 2d will be coerced to 2d. nsides : int, optional If 2, a centered moving average is computed using the filter coefficients. If 1, the filter coefficients are for past values only. Both methods use scipy.signal.convolve. Returns ------- y : ndarray, 2d Filtered array, number of columns determined by x and filt. If a pandas object is given, a pandas object is returned. The index of the return is the exact same as the time period in ``x`` Notes ----- In nsides == 1, x is filtered :: y[n] = filt[0]*x[n-1] + ... + filt[n_filt-1]*x[n-n_filt] where n_filt is len(filt). If nsides == 2, x is filtered around lag 0 :: y[n] = filt[0]*x[n - n_filt/2] + ... + filt[n_filt / 2] * x[n] + ... + x[n + n_filt/2] where n_filt is len(filt). If n_filt is even, then more of the filter is forward in time than backward. If filt is 1d or (nlags,1) one lag polynomial is applied to all variables (columns of x). If filt is 2d, (nlags, nvars) each series is independently filtered with its own lag polynomial, uses loop over nvar. This is different than the usual 2d vs 2d convolution. Filtering is done with scipy.signal.convolve, so it will be reasonably fast for medium sized data. For large data fft convolution would be faster. ''' # for nsides shift the index instead of using 0 for 0 lag this # allows correct handling of NaNs if nsides == 1: trim_head = len(filt) - 1 trim_tail = None elif nsides == 2: trim_head = int(np.ceil(len(filt)/2.) - 1) or None trim_tail = int(np.ceil(len(filt)/2.) - len(filt) % 2) or None else: # pragma : no cover raise ValueError("nsides must be 1 or 2") _pandas_wrapper = _maybe_get_pandas_wrapper(x) x = np.asarray(x) filt = np.asarray(filt) if x.ndim > 1 and filt.ndim == 1: filt = filt[:, None] if x.ndim > 2: raise ValueError('x array has to be 1d or 2d') if filt.ndim == 1 or min(filt.shape) == 1: result = signal.convolve(x, filt, mode='valid') elif filt.ndim == 2: nlags = filt.shape[0] nvar = x.shape[1] result = np.zeros((x.shape[0] - nlags + 1, nvar)) if nsides == 2: for i in range(nvar): # could also use np.convolve, but easier for swiching to fft result[:, i] = signal.convolve(x[:, i], filt[:, i], mode='valid') elif nsides == 1: for i in range(nvar): result[:, i] = signal.convolve(x[:, i], np.r_[0, filt[:, i]], mode='valid') result = _pad_nans(result, trim_head, trim_tail) if _pandas_wrapper: return _pandas_wrapper(result) return result #copied from sandbox.tsa.garch def miso_lfilter(ar, ma, x, useic=False): #[0.1,0.1]): ''' use nd convolution to merge inputs, then use lfilter to produce output arguments for column variables return currently 1d Parameters ---------- ar : array_like, 1d, float autoregressive lag polynomial including lag zero, ar(L)y_t ma : array_like, same ndim as x, currently 2d moving average lag polynomial ma(L)x_t x : array_like, 2d input data series, time in rows, variables in columns Returns ------- y : array, 1d filtered output series inp : array, 1d combined input series Notes ----- currently for 2d inputs only, no choice of axis Use of signal.lfilter requires that ar lag polynomial contains floating point numbers does not cut off invalid starting and final values miso_lfilter find array y such that:: ar(L)y_t = ma(L)x_t with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars) ''' ma = np.asarray(ma) ar = np.asarray(ar) #inp = signal.convolve(x, ma, mode='valid') #inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2] #Note: convolve mixes up the variable left-right flip #I only want the flip in time direction #this might also be a mistake or problem in other code where I #switched from correlate to convolve # correct convolve version, for use with fftconvolve in other cases #inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2] inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2] #for testing 2d equivalence between convolve and correlate #np.testing.assert_almost_equal(inp2, inp) nobs = x.shape[0] # cut of extra values at end #todo initialize also x for correlate if useic: return signal.lfilter([1], ar, inp, #zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs] zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs] else: return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs] #return signal.lfilter([1], ar, inp), inp
bsd-3-clause
anntzer/scikit-learn
sklearn/tests/test_pipeline.py
6
45406
""" Test the pipeline module. """ from tempfile import mkdtemp import shutil import time import re import itertools import pytest import numpy as np from scipy import sparse import joblib from sklearn.utils.fixes import parse_version from sklearn.utils._testing import ( assert_raises, assert_raises_regex, assert_raise_message, assert_allclose, assert_array_equal, assert_array_almost_equal, assert_no_warnings, MinimalClassifier, MinimalRegressor, MinimalTransformer, ) from sklearn.base import clone, is_classifier, BaseEstimator, TransformerMixin from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union from sklearn.svm import SVC from sklearn.neighbors import LocalOutlierFactor from sklearn.linear_model import LogisticRegression, Lasso from sklearn.linear_model import LinearRegression from sklearn.metrics import accuracy_score, r2_score from sklearn.cluster import KMeans from sklearn.feature_selection import SelectKBest, f_classif from sklearn.dummy import DummyRegressor from sklearn.decomposition import PCA, TruncatedSVD from sklearn.datasets import load_iris from sklearn.preprocessing import StandardScaler from sklearn.feature_extraction.text import CountVectorizer from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.impute import SimpleImputer iris = load_iris() JUNK_FOOD_DOCS = ( "the pizza pizza beer copyright", "the pizza burger beer copyright", "the the pizza beer beer copyright", "the burger beer beer copyright", "the coke burger coke copyright", "the coke burger burger", ) class NoFit: """Small class to test parameter dispatching. """ def __init__(self, a=None, b=None): self.a = a self.b = b class NoTrans(NoFit): def fit(self, X, y): return self def get_params(self, deep=False): return {'a': self.a, 'b': self.b} def set_params(self, **params): self.a = params['a'] return self class NoInvTransf(NoTrans): def transform(self, X): return X class Transf(NoInvTransf): def transform(self, X): return X def inverse_transform(self, X): return X class TransfFitParams(Transf): def fit(self, X, y, **fit_params): self.fit_params = fit_params return self class Mult(BaseEstimator): def __init__(self, mult=1): self.mult = mult def fit(self, X, y): return self def transform(self, X): return np.asarray(X) * self.mult def inverse_transform(self, X): return np.asarray(X) / self.mult def predict(self, X): return (np.asarray(X) * self.mult).sum(axis=1) predict_proba = predict_log_proba = decision_function = predict def score(self, X, y=None): return np.sum(X) class FitParamT(BaseEstimator): """Mock classifier """ def __init__(self): self.successful = False def fit(self, X, y, should_succeed=False): self.successful = should_succeed def predict(self, X): return self.successful def fit_predict(self, X, y, should_succeed=False): self.fit(X, y, should_succeed=should_succeed) return self.predict(X) def score(self, X, y=None, sample_weight=None): if sample_weight is not None: X = X * sample_weight return np.sum(X) class DummyTransf(Transf): """Transformer which store the column means""" def fit(self, X, y): self.means_ = np.mean(X, axis=0) # store timestamp to figure out whether the result of 'fit' has been # cached or not self.timestamp_ = time.time() return self class DummyEstimatorParams(BaseEstimator): """Mock classifier that takes params on predict""" def fit(self, X, y): return self def predict(self, X, got_attribute=False): self.got_attribute = got_attribute return self def test_pipeline_init(): # Test the various init parameters of the pipeline. assert_raises(TypeError, Pipeline) # Check that we can't instantiate pipelines with objects without fit # method assert_raises_regex(TypeError, 'Last step of Pipeline should implement fit ' 'or be the string \'passthrough\'' '.*NoFit.*', Pipeline, [('clf', NoFit())]) # Smoke test with only an estimator clf = NoTrans() pipe = Pipeline([('svc', clf)]) assert (pipe.get_params(deep=True) == dict(svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False))) # Check that params are set pipe.set_params(svc__a=0.1) assert clf.a == 0.1 assert clf.b is None # Smoke test the repr: repr(pipe) # Test with two objects clf = SVC() filter1 = SelectKBest(f_classif) pipe = Pipeline([('anova', filter1), ('svc', clf)]) # Check that estimators are not cloned on pipeline construction assert pipe.named_steps['anova'] is filter1 assert pipe.named_steps['svc'] is clf # Check that we can't instantiate with non-transformers on the way # Note that NoTrans implements fit, but not transform assert_raises_regex(TypeError, 'All intermediate steps should be transformers' '.*\\bNoTrans\\b.*', Pipeline, [('t', NoTrans()), ('svc', clf)]) # Check that params are set pipe.set_params(svc__C=0.1) assert clf.C == 0.1 # Smoke test the repr: repr(pipe) # Check that params are not set when naming them wrong assert_raises(ValueError, pipe.set_params, anova__C=0.1) # Test clone pipe2 = assert_no_warnings(clone, pipe) assert not pipe.named_steps['svc'] is pipe2.named_steps['svc'] # Check that apart from estimators, the parameters are the same params = pipe.get_params(deep=True) params2 = pipe2.get_params(deep=True) for x in pipe.get_params(deep=False): params.pop(x) for x in pipe2.get_params(deep=False): params2.pop(x) # Remove estimators that where copied params.pop('svc') params.pop('anova') params2.pop('svc') params2.pop('anova') assert params == params2 def test_pipeline_init_tuple(): # Pipeline accepts steps as tuple X = np.array([[1, 2]]) pipe = Pipeline((('transf', Transf()), ('clf', FitParamT()))) pipe.fit(X, y=None) pipe.score(X) pipe.set_params(transf='passthrough') pipe.fit(X, y=None) pipe.score(X) def test_pipeline_methods_anova(): # Test the various methods of the pipeline (anova). X = iris.data y = iris.target # Test with Anova + LogisticRegression clf = LogisticRegression() filter1 = SelectKBest(f_classif, k=2) pipe = Pipeline([('anova', filter1), ('logistic', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_fit_params(): # Test that the pipeline can take fit parameters pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())]) pipe.fit(X=None, y=None, clf__should_succeed=True) # classifier should return True assert pipe.predict(None) # and transformer params should not be changed assert pipe.named_steps['transf'].a is None assert pipe.named_steps['transf'].b is None # invalid parameters should raise an error message assert_raise_message( TypeError, "fit() got an unexpected keyword argument 'bad'", pipe.fit, None, None, clf__bad=True ) def test_pipeline_sample_weight_supported(): # Pipeline should pass sample_weight X = np.array([[1, 2]]) pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())]) pipe.fit(X, y=None) assert pipe.score(X) == 3 assert pipe.score(X, y=None) == 3 assert pipe.score(X, y=None, sample_weight=None) == 3 assert pipe.score(X, sample_weight=np.array([2, 3])) == 8 def test_pipeline_sample_weight_unsupported(): # When sample_weight is None it shouldn't be passed X = np.array([[1, 2]]) pipe = Pipeline([('transf', Transf()), ('clf', Mult())]) pipe.fit(X, y=None) assert pipe.score(X) == 3 assert pipe.score(X, sample_weight=None) == 3 assert_raise_message( TypeError, "score() got an unexpected keyword argument 'sample_weight'", pipe.score, X, sample_weight=np.array([2, 3]) ) def test_pipeline_raise_set_params_error(): # Test pipeline raises set params error message for nested models. pipe = Pipeline([('cls', LinearRegression())]) # expected error message error_msg = ('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.') assert_raise_message(ValueError, error_msg % ('fake', pipe), pipe.set_params, fake='nope') # nested model check assert_raise_message(ValueError, error_msg % ("fake", pipe), pipe.set_params, fake__estimator='nope') def test_pipeline_methods_pca_svm(): # Test the various methods of the pipeline (pca + svm). X = iris.data y = iris.target # Test with PCA + SVC clf = SVC(probability=True, random_state=0) pca = PCA(svd_solver='full', n_components='mle', whiten=True) pipe = Pipeline([('pca', pca), ('svc', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_score_samples_pca_lof(): X = iris.data # Test that the score_samples method is implemented on a pipeline. # Test that the score_samples method on pipeline yields same results as # applying transform and score_samples steps separately. pca = PCA(svd_solver='full', n_components='mle', whiten=True) lof = LocalOutlierFactor(novelty=True) pipe = Pipeline([('pca', pca), ('lof', lof)]) pipe.fit(X) # Check the shapes assert pipe.score_samples(X).shape == (X.shape[0],) # Check the values lof.fit(pca.fit_transform(X)) assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X))) def test_score_samples_on_pipeline_without_score_samples(): X = np.array([[1], [2]]) y = np.array([1, 2]) # Test that a pipeline does not have score_samples method when the final # step of the pipeline does not have score_samples defined. pipe = make_pipeline(LogisticRegression()) pipe.fit(X, y) with pytest.raises(AttributeError, match="'LogisticRegression' object has no attribute " "'score_samples'"): pipe.score_samples(X) def test_pipeline_methods_preprocessing_svm(): # Test the various methods of the pipeline (preprocessing + svm). X = iris.data y = iris.target n_samples = X.shape[0] n_classes = len(np.unique(y)) scaler = StandardScaler() pca = PCA(n_components=2, svd_solver='randomized', whiten=True) clf = SVC(probability=True, random_state=0, decision_function_shape='ovr') for preprocessing in [scaler, pca]: pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)]) pipe.fit(X, y) # check shapes of various prediction functions predict = pipe.predict(X) assert predict.shape == (n_samples,) proba = pipe.predict_proba(X) assert proba.shape == (n_samples, n_classes) log_proba = pipe.predict_log_proba(X) assert log_proba.shape == (n_samples, n_classes) decision_function = pipe.decision_function(X) assert decision_function.shape == (n_samples, n_classes) pipe.score(X, y) def test_fit_predict_on_pipeline(): # test that the fit_predict method is implemented on a pipeline # test that the fit_predict on pipeline yields same results as applying # transform and clustering steps separately scaler = StandardScaler() km = KMeans(random_state=0) # As pipeline doesn't clone estimators on construction, # it must have its own estimators scaler_for_pipeline = StandardScaler() km_for_pipeline = KMeans(random_state=0) # first compute the transform and clustering step separately scaled = scaler.fit_transform(iris.data) separate_pred = km.fit_predict(scaled) # use a pipeline to do the transform and clustering in one step pipe = Pipeline([ ('scaler', scaler_for_pipeline), ('Kmeans', km_for_pipeline) ]) pipeline_pred = pipe.fit_predict(iris.data) assert_array_almost_equal(pipeline_pred, separate_pred) def test_fit_predict_on_pipeline_without_fit_predict(): # tests that a pipeline does not have fit_predict method when final # step of pipeline does not have fit_predict defined scaler = StandardScaler() pca = PCA(svd_solver='full') pipe = Pipeline([('scaler', scaler), ('pca', pca)]) assert_raises_regex(AttributeError, "'PCA' object has no attribute 'fit_predict'", getattr, pipe, 'fit_predict') def test_fit_predict_with_intermediate_fit_params(): # tests that Pipeline passes fit_params to intermediate steps # when fit_predict is invoked pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())]) pipe.fit_predict(X=None, y=None, transf__should_get_this=True, clf__should_succeed=True) assert pipe.named_steps['transf'].fit_params['should_get_this'] assert pipe.named_steps['clf'].successful assert 'should_succeed' not in pipe.named_steps['transf'].fit_params def test_predict_with_predict_params(): # tests that Pipeline passes predict_params to the final estimator # when predict is invoked pipe = Pipeline([('transf', Transf()), ('clf', DummyEstimatorParams())]) pipe.fit(None, None) pipe.predict(X=None, got_attribute=True) assert pipe.named_steps['clf'].got_attribute def test_feature_union(): # basic sanity check for feature union X = iris.data X -= X.mean(axis=0) y = iris.target svd = TruncatedSVD(n_components=2, random_state=0) select = SelectKBest(k=1) fs = FeatureUnion([("svd", svd), ("select", select)]) fs.fit(X, y) X_transformed = fs.transform(X) assert X_transformed.shape == (X.shape[0], 3) # check if it does the expected thing assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) # test if it also works for sparse input # We use a different svd object to control the random_state stream fs = FeatureUnion([("svd", svd), ("select", select)]) X_sp = sparse.csr_matrix(X) X_sp_transformed = fs.fit_transform(X_sp, y) assert_array_almost_equal(X_transformed, X_sp_transformed.toarray()) # Test clone fs2 = assert_no_warnings(clone, fs) assert fs.transformer_list[0][1] is not fs2.transformer_list[0][1] # test setting parameters fs.set_params(select__k=2) assert fs.fit_transform(X, y).shape == (X.shape[0], 4) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)]) X_transformed = fs.fit_transform(X, y) assert X_transformed.shape == (X.shape[0], 8) # test error if some elements do not support transform assert_raises_regex(TypeError, 'All estimators should implement fit and ' 'transform.*\\bNoTrans\\b', FeatureUnion, [("transform", Transf()), ("no_transform", NoTrans())]) # test that init accepts tuples fs = FeatureUnion((("svd", svd), ("select", select))) fs.fit(X, y) def test_make_union(): pca = PCA(svd_solver='full') mock = Transf() fu = make_union(pca, mock) names, transformers = zip(*fu.transformer_list) assert names == ("pca", "transf") assert transformers == (pca, mock) def test_make_union_kwargs(): pca = PCA(svd_solver='full') mock = Transf() fu = make_union(pca, mock, n_jobs=3) assert fu.transformer_list == make_union(pca, mock).transformer_list assert 3 == fu.n_jobs # invalid keyword parameters should raise an error message assert_raise_message( TypeError, "make_union() got an unexpected " "keyword argument 'transformer_weights'", make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1} ) def test_pipeline_transform(): # Test whether pipeline works with a transformer at the end. # Also test pipeline.transform and pipeline.inverse_transform X = iris.data pca = PCA(n_components=2, svd_solver='full') pipeline = Pipeline([('pca', pca)]) # test transform and fit_transform: X_trans = pipeline.fit(X).transform(X) X_trans2 = pipeline.fit_transform(X) X_trans3 = pca.fit_transform(X) assert_array_almost_equal(X_trans, X_trans2) assert_array_almost_equal(X_trans, X_trans3) X_back = pipeline.inverse_transform(X_trans) X_back2 = pca.inverse_transform(X_trans) assert_array_almost_equal(X_back, X_back2) def test_pipeline_fit_transform(): # Test whether pipeline works with a transformer missing fit_transform X = iris.data y = iris.target transf = Transf() pipeline = Pipeline([('mock', transf)]) # test fit_transform: X_trans = pipeline.fit_transform(X, y) X_trans2 = transf.fit(X, y).transform(X) assert_array_almost_equal(X_trans, X_trans2) @pytest.mark.parametrize("start, end", [(0, 1), (0, 2), (1, 2), (1, 3), (None, 1), (1, None), (None, None)]) def test_pipeline_slice(start, end): pipe = Pipeline( [("transf1", Transf()), ("transf2", Transf()), ("clf", FitParamT())], memory="123", verbose=True, ) pipe_slice = pipe[start:end] # Test class assert isinstance(pipe_slice, Pipeline) # Test steps assert pipe_slice.steps == pipe.steps[start:end] # Test named_steps attribute assert list(pipe_slice.named_steps.items()) == list( pipe.named_steps.items())[start:end] # Test the rest of the parameters pipe_params = pipe.get_params(deep=False) pipe_slice_params = pipe_slice.get_params(deep=False) del pipe_params["steps"] del pipe_slice_params["steps"] assert pipe_params == pipe_slice_params # Test exception msg = "Pipeline slicing only supports a step of 1" with pytest.raises(ValueError, match=msg): pipe[start:end:-1] def test_pipeline_index(): transf = Transf() clf = FitParamT() pipe = Pipeline([('transf', transf), ('clf', clf)]) assert pipe[0] == transf assert pipe['transf'] == transf assert pipe[-1] == clf assert pipe['clf'] == clf assert_raises(IndexError, lambda: pipe[3]) assert_raises(KeyError, lambda: pipe['foobar']) def test_set_pipeline_steps(): transf1 = Transf() transf2 = Transf() pipeline = Pipeline([('mock', transf1)]) assert pipeline.named_steps['mock'] is transf1 # Directly setting attr pipeline.steps = [('mock2', transf2)] assert 'mock' not in pipeline.named_steps assert pipeline.named_steps['mock2'] is transf2 assert [('mock2', transf2)] == pipeline.steps # Using set_params pipeline.set_params(steps=[('mock', transf1)]) assert [('mock', transf1)] == pipeline.steps # Using set_params to replace single step pipeline.set_params(mock=transf2) assert [('mock', transf2)] == pipeline.steps # With invalid data pipeline.set_params(steps=[('junk', ())]) assert_raises(TypeError, pipeline.fit, [[1]], [1]) assert_raises(TypeError, pipeline.fit_transform, [[1]], [1]) def test_pipeline_named_steps(): transf = Transf() mult2 = Mult(mult=2) pipeline = Pipeline([('mock', transf), ("mult", mult2)]) # Test access via named_steps bunch object assert 'mock' in pipeline.named_steps assert 'mock2' not in pipeline.named_steps assert pipeline.named_steps.mock is transf assert pipeline.named_steps.mult is mult2 # Test bunch with conflict attribute of dict pipeline = Pipeline([('values', transf), ("mult", mult2)]) assert pipeline.named_steps.values is not transf assert pipeline.named_steps.mult is mult2 @pytest.mark.parametrize('passthrough', [None, 'passthrough']) def test_pipeline_correctly_adjusts_steps(passthrough): X = np.array([[1]]) y = np.array([1]) mult2 = Mult(mult=2) mult3 = Mult(mult=3) mult5 = Mult(mult=5) pipeline = Pipeline([ ('m2', mult2), ('bad', passthrough), ('m3', mult3), ('m5', mult5) ]) pipeline.fit(X, y) expected_names = ['m2', 'bad', 'm3', 'm5'] actual_names = [name for name, _ in pipeline.steps] assert expected_names == actual_names @pytest.mark.parametrize('passthrough', [None, 'passthrough']) def test_set_pipeline_step_passthrough(passthrough): X = np.array([[1]]) y = np.array([1]) mult2 = Mult(mult=2) mult3 = Mult(mult=3) mult5 = Mult(mult=5) def make(): return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)]) pipeline = make() exp = 2 * 3 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline.set_params(m3=passthrough) exp = 2 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) assert (pipeline.get_params(deep=True) == {'steps': pipeline.steps, 'm2': mult2, 'm3': passthrough, 'last': mult5, 'memory': None, 'm2__mult': 2, 'last__mult': 5, 'verbose': False }) pipeline.set_params(m2=passthrough) exp = 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) # for other methods, ensure no AttributeErrors on None: other_methods = ['predict_proba', 'predict_log_proba', 'decision_function', 'transform', 'score'] for method in other_methods: getattr(pipeline, method)(X) pipeline.set_params(m2=mult2) exp = 2 * 5 assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) pipeline = make() pipeline.set_params(last=passthrough) # mult2 and mult3 are active exp = 6 assert_array_equal([[exp]], pipeline.fit(X, y).transform(X)) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) assert_raise_message(AttributeError, "'str' object has no attribute 'predict'", getattr, pipeline, 'predict') # Check 'passthrough' step at construction time exp = 2 * 5 pipeline = Pipeline( [('m2', mult2), ('m3', passthrough), ('last', mult5)]) assert_array_equal([[exp]], pipeline.fit_transform(X, y)) assert_array_equal([exp], pipeline.fit(X).predict(X)) assert_array_equal(X, pipeline.inverse_transform([[exp]])) def test_pipeline_ducktyping(): pipeline = make_pipeline(Mult(5)) pipeline.predict pipeline.transform pipeline.inverse_transform pipeline = make_pipeline(Transf()) assert not hasattr(pipeline, 'predict') pipeline.transform pipeline.inverse_transform pipeline = make_pipeline('passthrough') assert pipeline.steps[0] == ('passthrough', 'passthrough') assert not hasattr(pipeline, 'predict') pipeline.transform pipeline.inverse_transform pipeline = make_pipeline(Transf(), NoInvTransf()) assert not hasattr(pipeline, 'predict') pipeline.transform assert not hasattr(pipeline, 'inverse_transform') pipeline = make_pipeline(NoInvTransf(), Transf()) assert not hasattr(pipeline, 'predict') pipeline.transform assert not hasattr(pipeline, 'inverse_transform') def test_make_pipeline(): t1 = Transf() t2 = Transf() pipe = make_pipeline(t1, t2) assert isinstance(pipe, Pipeline) assert pipe.steps[0][0] == "transf-1" assert pipe.steps[1][0] == "transf-2" pipe = make_pipeline(t1, t2, FitParamT()) assert isinstance(pipe, Pipeline) assert pipe.steps[0][0] == "transf-1" assert pipe.steps[1][0] == "transf-2" assert pipe.steps[2][0] == "fitparamt" def test_feature_union_weights(): # test feature union with transformer weights X = iris.data y = iris.target pca = PCA(n_components=2, svd_solver='randomized', random_state=0) select = SelectKBest(k=1) # test using fit followed by transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) fs.fit(X, y) X_transformed = fs.transform(X) # test using fit_transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) X_fit_transformed = fs.fit_transform(X, y) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)], transformer_weights={"mock": 10}) X_fit_transformed_wo_method = fs.fit_transform(X, y) # check against expected result # We use a different pca object to control the random_state stream assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) assert_array_almost_equal(X_fit_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_fit_transformed[:, -1], select.fit_transform(X, y).ravel()) assert X_fit_transformed_wo_method.shape == (X.shape[0], 7) def test_feature_union_parallel(): # test that n_jobs work for FeatureUnion X = JUNK_FOOD_DOCS fs = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ]) fs_parallel = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs_parallel2 = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs.fit(X) X_transformed = fs.transform(X) assert X_transformed.shape[0] == len(X) fs_parallel.fit(X) X_transformed_parallel = fs_parallel.transform(X) assert X_transformed.shape == X_transformed_parallel.shape assert_array_equal( X_transformed.toarray(), X_transformed_parallel.toarray() ) # fit_transform should behave the same X_transformed_parallel2 = fs_parallel2.fit_transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) # transformers should stay fit after fit_transform X_transformed_parallel2 = fs_parallel2.transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) def test_feature_union_feature_names(): word_vect = CountVectorizer(analyzer="word") char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3)) ft = FeatureUnion([("chars", char_vect), ("words", word_vect)]) ft.fit(JUNK_FOOD_DOCS) feature_names = ft.get_feature_names() for feat in feature_names: assert "chars__" in feat or "words__" in feat assert len(feature_names) == 35 ft = FeatureUnion([("tr1", Transf())]).fit([[1]]) assert_raise_message(AttributeError, 'Transformer tr1 (type Transf) does not provide ' 'get_feature_names', ft.get_feature_names) def test_classes_property(): X = iris.data y = iris.target reg = make_pipeline(SelectKBest(k=1), LinearRegression()) reg.fit(X, y) assert_raises(AttributeError, getattr, reg, "classes_") clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0)) assert_raises(AttributeError, getattr, clf, "classes_") clf.fit(X, y) assert_array_equal(clf.classes_, np.unique(y)) def test_set_feature_union_steps(): mult2 = Mult(2) mult2.get_feature_names = lambda: ['x2'] mult3 = Mult(3) mult3.get_feature_names = lambda: ['x3'] mult5 = Mult(5) mult5.get_feature_names = lambda: ['x5'] ft = FeatureUnion([('m2', mult2), ('m3', mult3)]) assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]]))) assert ['m2__x2', 'm3__x3'] == ft.get_feature_names() # Directly setting attr ft.transformer_list = [('m5', mult5)] assert_array_equal([[5]], ft.transform(np.asarray([[1]]))) assert ['m5__x5'] == ft.get_feature_names() # Using set_params ft.set_params(transformer_list=[('mock', mult3)]) assert_array_equal([[3]], ft.transform(np.asarray([[1]]))) assert ['mock__x3'] == ft.get_feature_names() # Using set_params to replace single step ft.set_params(mock=mult5) assert_array_equal([[5]], ft.transform(np.asarray([[1]]))) assert ['mock__x5'] == ft.get_feature_names() def test_set_feature_union_step_drop(): mult2 = Mult(2) mult2.get_feature_names = lambda: ['x2'] mult3 = Mult(3) mult3.get_feature_names = lambda: ['x3'] X = np.asarray([[1]]) ft = FeatureUnion([('m2', mult2), ('m3', mult3)]) assert_array_equal([[2, 3]], ft.fit(X).transform(X)) assert_array_equal([[2, 3]], ft.fit_transform(X)) assert ['m2__x2', 'm3__x3'] == ft.get_feature_names() with pytest.warns(None) as record: ft.set_params(m2='drop') assert_array_equal([[3]], ft.fit(X).transform(X)) assert_array_equal([[3]], ft.fit_transform(X)) assert ['m3__x3'] == ft.get_feature_names() assert not record with pytest.warns(None) as record: ft.set_params(m3='drop') assert_array_equal([[]], ft.fit(X).transform(X)) assert_array_equal([[]], ft.fit_transform(X)) assert [] == ft.get_feature_names() assert not record with pytest.warns(None) as record: # check we can change back ft.set_params(m3=mult3) assert_array_equal([[3]], ft.fit(X).transform(X)) assert not record with pytest.warns(None) as record: # Check 'drop' step at construction time ft = FeatureUnion([('m2', 'drop'), ('m3', mult3)]) assert_array_equal([[3]], ft.fit(X).transform(X)) assert_array_equal([[3]], ft.fit_transform(X)) assert ['m3__x3'] == ft.get_feature_names() assert not record def test_step_name_validation(): bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))] bad_steps2 = [('a', Mult(2)), ('a', Mult(3))] for cls, param in [(Pipeline, 'steps'), (FeatureUnion, 'transformer_list')]: # we validate in construction (despite scikit-learn convention) bad_steps3 = [('a', Mult(2)), (param, Mult(3))] for bad_steps, message in [ (bad_steps1, "Estimator names must not contain __: got ['a__q']"), (bad_steps2, "Names provided are not unique: ['a', 'a']"), (bad_steps3, "Estimator names conflict with constructor " "arguments: ['%s']" % param), ]: # three ways to make invalid: # - construction assert_raise_message(ValueError, message, cls, **{param: bad_steps}) # - setattr est = cls(**{param: [('a', Mult(1))]}) setattr(est, param, bad_steps) assert_raise_message(ValueError, message, est.fit, [[1]], [1]) assert_raise_message(ValueError, message, est.fit_transform, [[1]], [1]) # - set_params est = cls(**{param: [('a', Mult(1))]}) est.set_params(**{param: bad_steps}) assert_raise_message(ValueError, message, est.fit, [[1]], [1]) assert_raise_message(ValueError, message, est.fit_transform, [[1]], [1]) def test_set_params_nested_pipeline(): estimator = Pipeline([ ('a', Pipeline([ ('b', DummyRegressor()) ])) ]) estimator.set_params(a__b__alpha=0.001, a__b=Lasso()) estimator.set_params(a__steps=[('b', LogisticRegression())], a__b__C=5) def test_pipeline_wrong_memory(): # Test that an error is raised when memory is not a string or a Memory # instance X = iris.data y = iris.target # Define memory as an integer memory = 1 cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())], memory=memory) assert_raises_regex(ValueError, "'memory' should be None, a string or" " have the same interface as joblib.Memory." " Got memory='1' instead.", cached_pipe.fit, X, y) class DummyMemory: def cache(self, func): return func class WrongDummyMemory: pass def test_pipeline_with_cache_attribute(): X = np.array([[1, 2]]) pipe = Pipeline([('transf', Transf()), ('clf', Mult())], memory=DummyMemory()) pipe.fit(X, y=None) dummy = WrongDummyMemory() pipe = Pipeline([('transf', Transf()), ('clf', Mult())], memory=dummy) assert_raises_regex(ValueError, "'memory' should be None, a string or" " have the same interface as joblib.Memory." " Got memory='{}' instead.".format(dummy), pipe.fit, X) def test_pipeline_memory(): X = iris.data y = iris.target cachedir = mkdtemp() try: if parse_version(joblib.__version__) < parse_version('0.12'): # Deal with change of API in joblib memory = joblib.Memory(cachedir=cachedir, verbose=10) else: memory = joblib.Memory(location=cachedir, verbose=10) # Test with Transformer + SVC clf = SVC(probability=True, random_state=0) transf = DummyTransf() pipe = Pipeline([('transf', clone(transf)), ('svc', clf)]) cached_pipe = Pipeline([('transf', transf), ('svc', clf)], memory=memory) # Memoize the transformer at the first fit cached_pipe.fit(X, y) pipe.fit(X, y) # Get the time stamp of the transformer in the cached pipeline ts = cached_pipe.named_steps['transf'].timestamp_ # Check that cached_pipe and pipe yield identical results assert_array_equal(pipe.predict(X), cached_pipe.predict(X)) assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X)) assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X)) assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y)) assert_array_equal(pipe.named_steps['transf'].means_, cached_pipe.named_steps['transf'].means_) assert not hasattr(transf, 'means_') # Check that we are reading the cache while fitting # a second time cached_pipe.fit(X, y) # Check that cached_pipe and pipe yield identical results assert_array_equal(pipe.predict(X), cached_pipe.predict(X)) assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X)) assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X)) assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y)) assert_array_equal(pipe.named_steps['transf'].means_, cached_pipe.named_steps['transf'].means_) assert ts == cached_pipe.named_steps['transf'].timestamp_ # Create a new pipeline with cloned estimators # Check that even changing the name step does not affect the cache hit clf_2 = SVC(probability=True, random_state=0) transf_2 = DummyTransf() cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)], memory=memory) cached_pipe_2.fit(X, y) # Check that cached_pipe and pipe yield identical results assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X)) assert_array_equal(pipe.predict_proba(X), cached_pipe_2.predict_proba(X)) assert_array_equal(pipe.predict_log_proba(X), cached_pipe_2.predict_log_proba(X)) assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y)) assert_array_equal(pipe.named_steps['transf'].means_, cached_pipe_2.named_steps['transf_2'].means_) assert ts == cached_pipe_2.named_steps['transf_2'].timestamp_ finally: shutil.rmtree(cachedir) def test_make_pipeline_memory(): cachedir = mkdtemp() if parse_version(joblib.__version__) < parse_version('0.12'): # Deal with change of API in joblib memory = joblib.Memory(cachedir=cachedir, verbose=10) else: memory = joblib.Memory(location=cachedir, verbose=10) pipeline = make_pipeline(DummyTransf(), SVC(), memory=memory) assert pipeline.memory is memory pipeline = make_pipeline(DummyTransf(), SVC()) assert pipeline.memory is None assert len(pipeline) == 2 shutil.rmtree(cachedir) def test_pipeline_param_error(): clf = make_pipeline(LogisticRegression()) with pytest.raises(ValueError, match="Pipeline.fit does not accept " "the sample_weight parameter"): clf.fit([[0], [0]], [0, 1], sample_weight=[1, 1]) parameter_grid_test_verbose = ((est, pattern, method) for (est, pattern), method in itertools.product( [ (Pipeline([('transf', Transf()), ('clf', FitParamT())]), r'\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n' r'\[Pipeline\].*\(step 2 of 2\) Processing clf.* total=.*\n$'), (Pipeline([('transf', Transf()), ('noop', None), ('clf', FitParamT())]), r'\[Pipeline\].*\(step 1 of 3\) Processing transf.* total=.*\n' r'\[Pipeline\].*\(step 2 of 3\) Processing noop.* total=.*\n' r'\[Pipeline\].*\(step 3 of 3\) Processing clf.* total=.*\n$'), (Pipeline([('transf', Transf()), ('noop', 'passthrough'), ('clf', FitParamT())]), r'\[Pipeline\].*\(step 1 of 3\) Processing transf.* total=.*\n' r'\[Pipeline\].*\(step 2 of 3\) Processing noop.* total=.*\n' r'\[Pipeline\].*\(step 3 of 3\) Processing clf.* total=.*\n$'), (Pipeline([('transf', Transf()), ('clf', None)]), r'\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n' r'\[Pipeline\].*\(step 2 of 2\) Processing clf.* total=.*\n$'), (Pipeline([('transf', None), ('mult', Mult())]), r'\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n' r'\[Pipeline\].*\(step 2 of 2\) Processing mult.* total=.*\n$'), (Pipeline([('transf', 'passthrough'), ('mult', Mult())]), r'\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n' r'\[Pipeline\].*\(step 2 of 2\) Processing mult.* total=.*\n$'), (FeatureUnion([('mult1', Mult()), ('mult2', Mult())]), r'\[FeatureUnion\].*\(step 1 of 2\) Processing mult1.* total=.*\n' r'\[FeatureUnion\].*\(step 2 of 2\) Processing mult2.* total=.*\n$'), (FeatureUnion([('mult1', 'drop'), ('mult2', Mult()), ('mult3', 'drop')]), r'\[FeatureUnion\].*\(step 1 of 1\) Processing mult2.* total=.*\n$') ], ['fit', 'fit_transform', 'fit_predict']) if hasattr(est, method) and not ( method == 'fit_transform' and hasattr(est, 'steps') and isinstance(est.steps[-1][1], FitParamT)) ) @pytest.mark.parametrize('est, pattern, method', parameter_grid_test_verbose) def test_verbose(est, method, pattern, capsys): func = getattr(est, method) X = [[1, 2, 3], [4, 5, 6]] y = [[7], [8]] est.set_params(verbose=False) func(X, y) assert not capsys.readouterr().out, 'Got output for verbose=False' est.set_params(verbose=True) func(X, y) assert re.match(pattern, capsys.readouterr().out) def test_n_features_in_pipeline(): # make sure pipelines delegate n_features_in to the first step X = [[1, 2], [3, 4], [5, 6]] y = [0, 1, 2] ss = StandardScaler() gbdt = HistGradientBoostingClassifier() pipe = make_pipeline(ss, gbdt) assert not hasattr(pipe, 'n_features_in_') pipe.fit(X, y) assert pipe.n_features_in_ == ss.n_features_in_ == 2 # if the first step has the n_features_in attribute then the pipeline also # has it, even though it isn't fitted. ss = StandardScaler() gbdt = HistGradientBoostingClassifier() pipe = make_pipeline(ss, gbdt) ss.fit(X, y) assert pipe.n_features_in_ == ss.n_features_in_ == 2 assert not hasattr(gbdt, 'n_features_in_') def test_n_features_in_feature_union(): # make sure FeatureUnion delegates n_features_in to the first transformer X = [[1, 2], [3, 4], [5, 6]] y = [0, 1, 2] ss = StandardScaler() fu = make_union(ss) assert not hasattr(fu, 'n_features_in_') fu.fit(X, y) assert fu.n_features_in_ == ss.n_features_in_ == 2 # if the first step has the n_features_in attribute then the feature_union # also has it, even though it isn't fitted. ss = StandardScaler() fu = make_union(ss) ss.fit(X, y) assert fu.n_features_in_ == ss.n_features_in_ == 2 def test_feature_union_fit_params(): # Regression test for issue: #15117 class Dummy(TransformerMixin, BaseEstimator): def fit(self, X, y=None, **fit_params): if fit_params != {'a': 0}: raise ValueError return self def transform(self, X, y=None): return X X, y = iris.data, iris.target t = FeatureUnion([('dummy0', Dummy()), ('dummy1', Dummy())]) with pytest.raises(ValueError): t.fit(X, y) with pytest.raises(ValueError): t.fit_transform(X, y) t.fit(X, y, a=0) t.fit_transform(X, y, a=0) def test_pipeline_missing_values_leniency(): # check that pipeline let the missing values validation to # the underlying transformers and predictors. X, y = iris.data, iris.target mask = np.random.choice([1, 0], X.shape, p=[.1, .9]).astype(bool) X[mask] = np.nan pipe = make_pipeline(SimpleImputer(), LogisticRegression()) assert pipe.fit(X, y).score(X, y) > 0.4 def test_feature_union_warns_unknown_transformer_weight(): # Warn user when transformer_weights containers a key not present in # transformer_list X = [[1, 2], [3, 4], [5, 6]] y = [0, 1, 2] transformer_list = [('transf', Transf())] # Transformer weights dictionary with incorrect name weights = {'transformer': 1} expected_msg = ('Attempting to weight transformer "transformer", ' 'but it is not present in transformer_list.') union = FeatureUnion(transformer_list, transformer_weights=weights) with pytest.raises(ValueError, match=expected_msg): union.fit(X, y) @pytest.mark.parametrize('passthrough', [None, 'passthrough']) def test_pipeline_get_tags_none(passthrough): # Checks that tags are set correctly when the first transformer is None or # 'passthrough' # Non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/18815 pipe = make_pipeline(passthrough, SVC()) assert not pipe._get_tags()['pairwise'] # FIXME: Replace this test with a full `check_estimator` once we have API only # checks. @pytest.mark.parametrize("Predictor", [MinimalRegressor, MinimalClassifier]) def test_search_cv_using_minimal_compatible_estimator(Predictor): # Check that third-party library estimators can be part of a pipeline # and tuned by grid-search without inheriting from BaseEstimator. rng = np.random.RandomState(0) X, y = rng.randn(25, 2), np.array([0] * 5 + [1] * 20) model = Pipeline([ ("transformer", MinimalTransformer()), ("predictor", Predictor()) ]) model.fit(X, y) y_pred = model.predict(X) if is_classifier(model): assert_array_equal(y_pred, 1) assert model.score(X, y) == pytest.approx(accuracy_score(y, y_pred)) else: assert_allclose(y_pred, y.mean()) assert model.score(X, y) == pytest.approx(r2_score(y, y_pred))
bsd-3-clause
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/sklearn/ensemble/tests/test_bagging.py
7
29340
""" Testing for the bagging ensemble module (sklearn.ensemble.bagging). """ # Author: Gilles Louppe # License: BSD 3 clause import numpy as np from sklearn.base import BaseEstimator from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_raise_message from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.model_selection import GridSearchCV, ParameterGrid from sklearn.ensemble import BaggingClassifier, BaggingRegressor from sklearn.linear_model import Perceptron, LogisticRegression from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.svm import SVC, SVR from sklearn.pipeline import make_pipeline from sklearn.feature_selection import SelectKBest from sklearn.model_selection import train_test_split from sklearn.datasets import load_boston, load_iris, make_hastie_10_2 from sklearn.utils import check_random_state from scipy.sparse import csc_matrix, csr_matrix rng = check_random_state(0) # also load the iris dataset # and randomly permute it iris = load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] def test_classification(): # Check classification for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyClassifier(), Perceptron(tol=1e-3), DecisionTreeClassifier(), KNeighborsClassifier(), SVC()]: for params in grid: BaggingClassifier(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_classification(): # Check classification for various parameter settings on sparse input. class CustomSVC(SVC): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVC, self).fit(X, y) self.data_type_ = type(X) return self rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']: # Trained on sparse format sparse_classifier = BaggingClassifier( base_estimator=CustomSVC(decision_function_shape='ovr'), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = getattr(sparse_classifier, f)(X_test_sparse) # Trained on dense format dense_classifier = BaggingClassifier( base_estimator=CustomSVC(decision_function_shape='ovr'), random_state=1, **params ).fit(X_train, y_train) dense_results = getattr(dense_classifier, f)(X_test) assert_array_equal(sparse_results, dense_results) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert all([t == sparse_type for t in types]) def test_regression(): # Check regression for various parameter settings. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "max_features": [0.5, 1.0], "bootstrap": [True, False], "bootstrap_features": [True, False]}) for base_estimator in [None, DummyRegressor(), DecisionTreeRegressor(), KNeighborsRegressor(), SVR()]: for params in grid: BaggingRegressor(base_estimator=base_estimator, random_state=rng, **params).fit(X_train, y_train).predict(X_test) def test_sparse_regression(): # Check regression for various parameter settings on sparse input. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data[:50], boston.target[:50], random_state=rng) class CustomSVR(SVR): """SVC variant that records the nature of the training set""" def fit(self, X, y): super(CustomSVR, self).fit(X, y) self.data_type_ = type(X) return self parameter_sets = [ {"max_samples": 0.5, "max_features": 2, "bootstrap": True, "bootstrap_features": True}, {"max_samples": 1.0, "max_features": 4, "bootstrap": True, "bootstrap_features": True}, {"max_features": 2, "bootstrap": False, "bootstrap_features": True}, {"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False}, ] for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in parameter_sets: # Trained on sparse format sparse_classifier = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train_sparse, y_train) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_results = BaggingRegressor( base_estimator=CustomSVR(), random_state=1, **params ).fit(X_train, y_train).predict(X_test) sparse_type = type(X_train_sparse) types = [i.data_type_ for i in sparse_classifier.estimators_] assert_array_equal(sparse_results, dense_results) assert all([t == sparse_type for t in types]) assert_array_equal(sparse_results, dense_results) def test_bootstrap_samples(): # Test that bootstrapping samples generate non-perfect base estimators. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) base_estimator = DecisionTreeRegressor().fit(X_train, y_train) # without bootstrap, all trees are perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=False, random_state=rng).fit(X_train, y_train) assert_equal(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) # with bootstrap, trees are no longer perfect on the training set ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_samples=1.0, bootstrap=True, random_state=rng).fit(X_train, y_train) assert_greater(base_estimator.score(X_train, y_train), ensemble.score(X_train, y_train)) def test_bootstrap_features(): # Test that bootstrapping features may generate duplicate features. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=False, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_equal(boston.data.shape[1], np.unique(features).shape[0]) ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(), max_features=1.0, bootstrap_features=True, random_state=rng).fit(X_train, y_train) for features in ensemble.estimators_features_: assert_greater(boston.data.shape[1], np.unique(features).shape[0]) def test_probability(): # Predict probabilities. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) with np.errstate(divide="ignore", invalid="ignore"): # Normal case ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(), random_state=rng).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) # Degenerate case, where some classes are missing ensemble = BaggingClassifier(base_estimator=LogisticRegression(), random_state=rng, max_samples=5).fit(X_train, y_train) assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))) assert_array_almost_equal(ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))) def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) for base_estimator in [DecisionTreeClassifier(), SVC()]: clf = BaggingClassifier(base_estimator=base_estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingClassifier(base_estimator=base_estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_oob_score_regression(): # Check that oob prediction is a good estimation of the generalization # error. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=50, bootstrap=True, oob_score=True, random_state=rng).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert_less(abs(test_score - clf.oob_score_), 0.1) # Test with few estimators assert_warns(UserWarning, BaggingRegressor(base_estimator=DecisionTreeRegressor(), n_estimators=1, bootstrap=True, oob_score=True, random_state=rng).fit, X_train, y_train) def test_single_estimator(): # Check singleton ensembles. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(), n_estimators=1, bootstrap=False, bootstrap_features=False, random_state=rng).fit(X_train, y_train) clf2 = KNeighborsRegressor().fit(X_train, y_train) assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) def test_error(): # Test that it gives proper exception on deficient input. X, y = iris.data, iris.target base = DecisionTreeClassifier() # Test max_samples assert_raises(ValueError, BaggingClassifier(base, max_samples=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples=1000).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_samples="foobar").fit, X, y) # Test max_features assert_raises(ValueError, BaggingClassifier(base, max_features=-1).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=0.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=2.0).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features=5).fit, X, y) assert_raises(ValueError, BaggingClassifier(base, max_features="foobar").fit, X, y) # Test support of decision_function assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function')) def test_parallel_classification(): # Check parallel classification. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) # predict_proba ensemble.set_params(n_jobs=1) y1 = ensemble.predict_proba(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict_proba(X_test) assert_array_almost_equal(y1, y3) # decision_function ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) decisions1 = ensemble.decision_function(X_test) ensemble.set_params(n_jobs=2) decisions2 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions2) X_err = np.hstack((X_test, np.zeros((X_test.shape[0], 1)))) assert_raise_message(ValueError, "Number of features of the model " "must match the input. Model n_features is {0} " "and input n_features is {1} " "".format(X_test.shape[1], X_err.shape[1]), ensemble.decision_function, X_err) ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'), n_jobs=1, random_state=0).fit(X_train, y_train) decisions3 = ensemble.decision_function(X_test) assert_array_almost_equal(decisions1, decisions3) def test_parallel_regression(): # Check parallel regression. rng = check_random_state(0) X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) ensemble.set_params(n_jobs=1) y1 = ensemble.predict(X_test) ensemble.set_params(n_jobs=2) y2 = ensemble.predict(X_test) assert_array_almost_equal(y1, y2) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit(X_train, y_train) y3 = ensemble.predict(X_test) assert_array_almost_equal(y1, y3) def test_gridsearch(): # Check that bagging ensembles can be grid-searched. # Transform iris into a binary classification task X, y = iris.data, iris.target y[y == 2] = 1 # Grid search with scoring based on decision_function parameters = {'n_estimators': (1, 2), 'base_estimator__C': (1, 2)} GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y) def test_base_estimator(): # Check base_estimator and its default values. rng = check_random_state(0) # Classification X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=rng) ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(DecisionTreeClassifier(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier)) ensemble = BaggingClassifier(Perceptron(tol=1e-3), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, Perceptron)) # Regression X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=rng) ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor)) ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train) assert_true(isinstance(ensemble.base_estimator_, SVR)) def test_bagging_with_pipeline(): estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2) estimator.fit(iris.data, iris.target) assert_true(isinstance(estimator[0].steps[-1][1].random_state, int)) class DummyZeroEstimator(BaseEstimator): def fit(self, X, y): self.classes_ = np.unique(y) return self def predict(self, X): return self.classes_[np.zeros(X.shape[0], dtype=int)] def test_bagging_sample_weight_unsupported_but_passed(): estimator = BaggingClassifier(DummyZeroEstimator()) rng = check_random_state(0) estimator.fit(iris.data, iris.target).predict(iris.data) assert_raises(ValueError, estimator.fit, iris.data, iris.target, sample_weight=rng.randint(10, size=(iris.data.shape[0]))) def test_warm_start(random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = BaggingClassifier(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) def test_warm_start_smaller_n_estimators(): # Test if warm start'ed second fit with smaller n_estimators raises error. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_equal_n_estimators(): # Test that nothing happens when fitting without increasing n_estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # modify X to nonsense values, this should not change anything X_train += 1. assert_warns_message(UserWarning, "Warm-start fitting without increasing n_estimators does not", clf.fit, X_train, y_train) assert_array_equal(y_pred, clf.predict(X_test)) def test_warm_start_equivalence(): # warm started classifier with 5+5 estimators should be equivalent to # one classifier with 10 estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141) clf_ws.fit(X_train, y_train) clf_ws.set_params(n_estimators=10) clf_ws.fit(X_train, y_train) y1 = clf_ws.predict(X_test) clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141) clf.fit(X_train, y_train) y2 = clf.predict(X_test) assert_array_almost_equal(y1, y2) def test_warm_start_with_oob_score_fails(): # Check using oob_score and warm_start simultaneously fails X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) assert_raises(ValueError, clf.fit, X, y) def test_oob_score_removed_on_warm_start(): X, y = make_hastie_10_2(n_samples=2000, random_state=1) clf = BaggingClassifier(n_estimators=50, oob_score=True) clf.fit(X, y) clf.set_params(warm_start=True, oob_score=False, n_estimators=100) clf.fit(X, y) assert_raises(AttributeError, getattr, clf, "oob_score_") def test_oob_score_consistency(): # Make sure OOB scores are identical when random_state, estimator, and # training data are fixed and fitting is done twice X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=0.5, max_features=0.5, oob_score=True, random_state=1) assert_equal(bagging.fit(X, y).oob_score_, bagging.fit(X, y).oob_score_) def test_estimators_samples(): # Check that format of estimators_samples_ is correct and that results # generated at fit time can be identically reproduced at a later time # using data saved in object attributes. X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BaggingClassifier(LogisticRegression(), max_samples=0.5, max_features=0.5, random_state=1, bootstrap=False) bagging.fit(X, y) # Get relevant attributes estimators_samples = bagging.estimators_samples_ estimators_features = bagging.estimators_features_ estimators = bagging.estimators_ # Test for correct formatting assert_equal(len(estimators_samples), len(estimators)) assert_equal(len(estimators_samples[0]), len(X)) assert_equal(estimators_samples[0].dtype.kind, 'b') # Re-fit single estimator to test for consistent sampling estimator_index = 0 estimator_samples = estimators_samples[estimator_index] estimator_features = estimators_features[estimator_index] estimator = estimators[estimator_index] X_train = (X[estimator_samples])[:, estimator_features] y_train = y[estimator_samples] orig_coefs = estimator.coef_ estimator.fit(X_train, y_train) new_coefs = estimator.coef_ assert_array_almost_equal(orig_coefs, new_coefs) def test_max_samples_consistency(): # Make sure validated max_samples and original max_samples are identical # when valid integer max_samples supplied by user max_samples = 100 X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1) bagging = BaggingClassifier(KNeighborsClassifier(), max_samples=max_samples, max_features=0.5, random_state=1) bagging.fit(X, y) assert_equal(bagging._max_samples, max_samples) def test_set_oob_score_label_encoding(): # Make sure the oob_score doesn't change when the labels change # See: https://github.com/scikit-learn/scikit-learn/issues/8933 random_state = 5 X = [[-1], [0], [1]] * 5 Y1 = ['A', 'B', 'C'] * 5 Y2 = [-1, 0, 1] * 5 Y3 = [0, 1, 2] * 5 x1 = BaggingClassifier(oob_score=True, random_state=random_state).fit(X, Y1).oob_score_ x2 = BaggingClassifier(oob_score=True, random_state=random_state).fit(X, Y2).oob_score_ x3 = BaggingClassifier(oob_score=True, random_state=random_state).fit(X, Y3).oob_score_ assert_equal([x1, x2], [x3, x3])
mit
JelleAalbers/blueice
tests/test_inference.py
1
4362
# import matplotlib # matplotlib.use('agg') from blueice.test_helpers import * from blueice.inference import * from blueice.likelihood import UnbinnedLogLikelihood as LogLikelihood def test_fit_minuit(): # Single rate parameter lf = LogLikelihood(test_conf()) lf.add_rate_parameter('s0') lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_minuit(lf) assert isinstance(fit_result, dict) assert 's0_rate_multiplier' in fit_result # Don't fit res, ll = bestfit_minuit(lf, s0_rate_multiplier=1) assert len(res) == 0 assert ll == lf(s0_rate_multiplier=1) # Single shape parameter lf = LogLikelihood(test_conf()) lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2)) lf.prepare() lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_minuit(lf) assert 'some_multiplier' in fit_result # Shape and rate parameter lf = LogLikelihood(test_conf()) lf.add_rate_parameter('s0') lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2)) lf.prepare() lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_minuit(lf) assert 'some_multiplier' in fit_result assert 's0_rate_multiplier' in fit_result # Non-numeric shape parameter lf = LogLikelihood(test_conf()) lf.add_shape_parameter('strlen_multiplier', {1: 'x', 2: 'hi', 3:'wha'}, base_value=1) lf.prepare() lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_minuit(lf) assert 'strlen_multiplier' in fit_result def test_fit_scipy(): # Single rate parameter lf = LogLikelihood(test_conf()) lf.add_rate_parameter('s0') lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_scipy(lf) assert isinstance(fit_result, dict) assert 's0_rate_multiplier' in fit_result # Don't fit res, ll = bestfit_scipy(lf, s0_rate_multiplier=1) assert len(res) == 0 assert ll == lf(s0_rate_multiplier=1) # Single shape parameter lf = LogLikelihood(test_conf()) lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2)) lf.prepare() lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_scipy(lf) assert 'some_multiplier' in fit_result # Shape and rate parameter lf = LogLikelihood(test_conf()) lf.add_rate_parameter('s0') lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2)) lf.prepare() lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_scipy(lf) assert 'some_multiplier' in fit_result assert 's0_rate_multiplier' in fit_result # Non-numeric shape parameter lf = LogLikelihood(test_conf()) lf.add_shape_parameter('strlen_multiplier', {1: 'x', 2: 'hi', 3:'wha'}, base_value=1) lf.prepare() lf.set_data(lf.base_model.simulate()) fit_result, ll = bestfit_scipy(lf) assert 'strlen_multiplier' in fit_result # def test_plot(): # """Tests the plot_likelihood_space code. # For now just test that it doesn't crash -- image comparison tests are tricky... # """ # import matplotlib.pyplot as plt # lf = LogLikelihood(test_conf()) # lf.add_rate_parameter('s0') # lf.add_shape_parameter('some_multiplier', (0.5, 1, 1.5, 2)) # lf.prepare() # lf.set_data(lf.base_model.simulate()) # # plot_likelihood_ratio(lf, ('s0_rate_multiplier', np.linspace(0.5, 2, 3))) # plt.close() # plot_likelihood_ratio(lf, # ('s0_rate_multiplier', np.linspace(0.5, 2, 3)), # ('some_multiplier', np.linspace(0.5, 2, 3))) # plt.close() def test_limit(): """Test the limit setting code For now just tests if it runs, does not test whether the results are correct... """ lf = LogLikelihood(test_conf(n_sources=2)) lf.add_rate_parameter('s0') lf.prepare() lf.set_data(lf.base_model.simulate()) # Test upper limits one_parameter_interval(lf, target='s0_rate_multiplier', kind='upper', bound=40) one_parameter_interval(lf, target='s0_rate_multiplier', kind='lower', bound=0.1) one_parameter_interval(lf, target='s0_rate_multiplier', kind='central', bound=(0.1, 20)) # Bit tricky to test multiple params, in these simple examples they can compensate completely for each other # so all values in a subspace seem equally likely once two of them are floating.
bsd-3-clause
tawsifkhan/scikit-learn
examples/decomposition/plot_pca_vs_fa_model_selection.py
142
4467
""" =============================================================== Model selection with Probabilistic PCA and Factor Analysis (FA) =============================================================== Probabilistic PCA and Factor Analysis are probabilistic models. The consequence is that the likelihood of new data can be used for model selection and covariance estimation. Here we compare PCA and FA with cross-validation on low rank data corrupted with homoscedastic noise (noise variance is the same for each feature) or heteroscedastic noise (noise variance is the different for each feature). In a second step we compare the model likelihood to the likelihoods obtained from shrinkage covariance estimators. One can observe that with homoscedastic noise both FA and PCA succeed in recovering the size of the low rank subspace. The likelihood with PCA is higher than FA in this case. However PCA fails and overestimates the rank when heteroscedastic noise is present. Under appropriate circumstances the low rank models are more likely than shrinkage models. The automatic estimation from Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 by Thomas P. Minka is also compared. """ print(__doc__) # Authors: Alexandre Gramfort # Denis A. Engemann # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from scipy import linalg from sklearn.decomposition import PCA, FactorAnalysis from sklearn.covariance import ShrunkCovariance, LedoitWolf from sklearn.cross_validation import cross_val_score from sklearn.grid_search import GridSearchCV ############################################################################### # Create the data n_samples, n_features, rank = 1000, 50, 10 sigma = 1. rng = np.random.RandomState(42) U, _, _ = linalg.svd(rng.randn(n_features, n_features)) X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T) # Adding homoscedastic noise X_homo = X + sigma * rng.randn(n_samples, n_features) # Adding heteroscedastic noise sigmas = sigma * rng.rand(n_features) + sigma / 2. X_hetero = X + rng.randn(n_samples, n_features) * sigmas ############################################################################### # Fit the models n_components = np.arange(0, n_features, 5) # options for n_components def compute_scores(X): pca = PCA() fa = FactorAnalysis() pca_scores, fa_scores = [], [] for n in n_components: pca.n_components = n fa.n_components = n pca_scores.append(np.mean(cross_val_score(pca, X))) fa_scores.append(np.mean(cross_val_score(fa, X))) return pca_scores, fa_scores def shrunk_cov_score(X): shrinkages = np.logspace(-2, 0, 30) cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages}) return np.mean(cross_val_score(cv.fit(X).best_estimator_, X)) def lw_score(X): return np.mean(cross_val_score(LedoitWolf(), X)) for X, title in [(X_homo, 'Homoscedastic Noise'), (X_hetero, 'Heteroscedastic Noise')]: pca_scores, fa_scores = compute_scores(X) n_components_pca = n_components[np.argmax(pca_scores)] n_components_fa = n_components[np.argmax(fa_scores)] pca = PCA(n_components='mle') pca.fit(X) n_components_pca_mle = pca.n_components_ print("best n_components by PCA CV = %d" % n_components_pca) print("best n_components by FactorAnalysis CV = %d" % n_components_fa) print("best n_components by PCA MLE = %d" % n_components_pca_mle) plt.figure() plt.plot(n_components, pca_scores, 'b', label='PCA scores') plt.plot(n_components, fa_scores, 'r', label='FA scores') plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-') plt.axvline(n_components_pca, color='b', label='PCA CV: %d' % n_components_pca, linestyle='--') plt.axvline(n_components_fa, color='r', label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--') plt.axvline(n_components_pca_mle, color='k', label='PCA MLE: %d' % n_components_pca_mle, linestyle='--') # compare with other covariance estimators plt.axhline(shrunk_cov_score(X), color='violet', label='Shrunk Covariance MLE', linestyle='-.') plt.axhline(lw_score(X), color='orange', label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.') plt.xlabel('nb of components') plt.ylabel('CV scores') plt.legend(loc='lower right') plt.title(title) plt.show()
bsd-3-clause
TheProgrammingDuck/Europa-Challenge
Experimental/Standardiser.py
1
4518
''' Author: Flinn Dolman @License: MIT, See License.txt at root of project. Method calls to this object serve the purpose of preparing data for the model and saving/loading the scale used to train the model. The same scale must be used to standardise data used for predicitions which is why this is important. There also exists a method for performing PCA analysis on data. ''' import pandas as pd import numpy as np import random from sklearn.model_selection import train_test_split #splitting up data for testing from sklearn import preprocessing #standardisation of data from sklearn.externals import joblib #saving/loading the scale used for standardising the data during training from sklearn.decomposition import PCA #principal component analysis import random class Standardiser: def __init__(self): pass #load in data, then standardise and split it for training/testing. def initialise(self): print("standardiser initialising") self.data = self.loadData() self.X_train, self.X_test, self.y_train, self.y_test = self.splitData(self.data) self.std_scale, self.X_train_std, self.X_test_std = self.standardise(self.X_train, self.X_test, self.y_train, self.y_test) #getter functions for retreiving split standardised data def get_std_X_train(self): return self.X_train_std def get_std_X_test(self): return self.X_test_std def get_y_train(self): return self.y_train def get_y_test(self): return self.y_test #read in the data as a pandas dataframe. ignore the first row (headers) and only use columns PCA deemed good for training on. def loadData(self): df = pd.io.parsers.read_csv( 'Data/NewBalanced.csv', header=None, skiprows = [0], usecols=[5,10,15,17,18,19,20,22]) return df #loads in Darkskies weather predictions for the coming week. def loadForecast(self,forecast_loc): #load in forecast csv self.foredf = pd.io.parsers.read_csv( forecast_loc, header=None, skiprows = [0], usecols=[1,2,3,4,5,6,7,8,9]) X_forecast = self.foredf.values[:,3:] X_forecast = self.standardise_Pred(X_forecast) return X_forecast #split data into training and testing samples def splitData(self, data): X = data.values[:,:7] y = data.values[:,7] #split the data into training and testing data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=random.randint(10,100000)) return X_train, X_test, y_train, y_test #standardise the data and save the scale it was standardised on. def standardise(self, X_train, X_test, y_train, y_test): #standardisation using sklearn self.std_scale = preprocessing.StandardScaler().fit(X_train) X_train_std = self.std_scale.transform(X_train) X_test_std = self.std_scale.transform(X_test) self.saveScale() return self.std_scale, X_train_std, X_test_std #standardises data readin for prediction def standardise_Pred(self, X_forecast): X_forecast_std = self.std_scale.transform(X_forecast) return X_forecast_std #performs PCA on the traning data. def PCAan(self, X_train_std, X_test_std, y_train): pca_std = PCA(n_components=2).fit(X_train_std) X_train_std = pca_std.transform(X_train_std) X_test_std = pca_std.transform(X_test_std) return pca_std, X_train_std, X_test_std #appends forecast predictions (both class and probability based) to the svm input csv and then produces a new svmoutput csv def make_CSV(self, fore_pred, fore_prob,outputfile): print("make_CSV") forearray = self.foredf.values.tolist() i = 0 for element in forearray: element.append(fore_pred[i]) element.append(fore_prob[i][1]) i +=1 df = pd.DataFrame(forearray) df.to_csv(outputfile) #save the scale standardised on def saveScale(self): print("saveScale") joblib.dump(self.std_scale, 'Models/Scaler.pkl') #load the scale standardised on def loadScale(self): print("loadScale") self.std_scale = joblib.load('Scaler.pkl')
mit
ereodeereigeo/dataTritiumWS22
ext_datos.py
1
2242
# -*- coding: utf8 -*- import numpy as np import pandas as pd import matplotlib.pyplot as plt import obtener as ob #Se crea una lista de encabezados que servirá para extraer las col listaEnc = ['time', 'errors', 'limiters', 'currentSP', \ 'velocitySP', 'idcSP', 'miscSP', 'busVoltage','busCurrent',\ 'velocityMs', 'motorRpm', 'pcCurrent', 'pbCurrent', 'odometer',\ 'busCharge', 'bemf','voutD', 'voutQ', 'ioutD', 'ioutQ', '15v',\ '1.9v', '3.3v', 'motorTemp', 'dspTemp', 'phaseaTemp', 'phasebTemp',\ 'phasecTemp', 'cantranserr', 'canrecerr', 'slipSpeed'] #definimos label como opción oculta para acotar los datos extraidos def extraer_data(archivo, fecha=None, label=listaEnc, time=0.2, values=True): #se crea una lista con los nombres de archivos del directorio indicado lista = ob.obtener_archivos(archivo, fecha) #se leen los archivos csv creandose una lista de tablas tablas = [] for nombres in lista: #lee los datos del archivo csv datos = pd.read_csv(nombres, names=label, header=0, index_col = 0, parse_dates = True) count = 1 for i in range(0,5): if datos.index[i]==datos.index[i+1]: count+=1 else: break if count==5: fecha_indice_inf = nombres[6:16]+' '+nombres[17:19]+':'+nombres[20:22]+':'+nombres[23:25] elif count == 4: fecha_indice_inf = nombres[6:16]+' '+nombres[17:19]+':'+nombres[20:22]+':'+nombres[23:25]+'.2' elif count == 3: fecha_indice_inf = nombres[6:16]+' '+nombres[17:19]+':'+nombres[20:22]+':'+nombres[23:25]+'.4' elif count == 2: fecha_indice_inf = nombres[6:16]+' '+nombres[17:19]+':'+nombres[20:22]+':'+nombres[23:25]+'.6' elif count == 1: fecha_indice_inf = nombres[6:16]+' '+nombres[17:19]+':'+nombres[20:22]+':'+nombres[23:25]+'.8' else: print "error" periodo = len(datos.index) tiempo_nuevo=pd.date_range(fecha_indice_inf,periods=periodo, freq='200ms', name = 'time') datos.index = tiempo_nuevo datos = datos.convert_objects(convert_numeric=True) datos = datos.drop(datos[['miscSP','velocityMs','odometer','busCharge','15v',\ '1.9v','3.3v','phaseaTemp','phasebTemp','cantranserr', 'canrecerr', 'slipSpeed']],1) if fecha == None: tablas.append(datos) elif str(fecha) == str(nombres[6:16]): tablas.append(datos) return tablas
gpl-2.0
jzt5132/scikit-learn
benchmarks/bench_isotonic.py
268
3046
""" Benchmarks of isotonic regression performance. We generate a synthetic dataset of size 10^n, for n in [min, max], and examine the time taken to run isotonic regression over the dataset. The timings are then output to stdout, or visualized on a log-log scale with matplotlib. This alows the scaling of the algorithm with the problem size to be visualized and understood. """ from __future__ import print_function import numpy as np import gc from datetime import datetime from sklearn.isotonic import isotonic_regression from sklearn.utils.bench import total_seconds import matplotlib.pyplot as plt import argparse def generate_perturbed_logarithm_dataset(size): return np.random.randint(-50, 50, size=n) \ + 50. * np.log(1 + np.arange(n)) def generate_logistic_dataset(size): X = np.sort(np.random.normal(size=size)) return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X)) DATASET_GENERATORS = { 'perturbed_logarithm': generate_perturbed_logarithm_dataset, 'logistic': generate_logistic_dataset } def bench_isotonic_regression(Y): """ Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds). """ gc.collect() tstart = datetime.now() isotonic_regression(Y) delta = datetime.now() - tstart return total_seconds(delta) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Isotonic Regression benchmark tool") parser.add_argument('--iterations', type=int, required=True, help="Number of iterations to average timings over " "for each problem size") parser.add_argument('--log_min_problem_size', type=int, required=True, help="Base 10 logarithm of the minimum problem size") parser.add_argument('--log_max_problem_size', type=int, required=True, help="Base 10 logarithm of the maximum problem size") parser.add_argument('--show_plot', action='store_true', help="Plot timing output with matplotlib") parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(), required=True) args = parser.parse_args() timings = [] for exponent in range(args.log_min_problem_size, args.log_max_problem_size): n = 10 ** exponent Y = DATASET_GENERATORS[args.dataset](n) time_per_iteration = \ [bench_isotonic_regression(Y) for i in range(args.iterations)] timing = (n, np.mean(time_per_iteration)) timings.append(timing) # If we're not plotting, dump the timing to stdout if not args.show_plot: print(n, np.mean(time_per_iteration)) if args.show_plot: plt.plot(*zip(*timings)) plt.title("Average time taken running isotonic regression") plt.xlabel('Number of observations') plt.ylabel('Time (s)') plt.axis('tight') plt.loglog() plt.show()
bsd-3-clause
klonage/nlt-gcs
Lib/site-packages/scipy/cluster/hierarchy.py
53
94069
""" Function Reference ------------------ These functions cut hierarchical clusterings into flat clusterings or find the roots of the forest formed by a cut by providing the flat cluster ids of each observation. .. autosummary:: :toctree: generated/ fcluster fclusterdata leaders These are routines for agglomerative clustering. .. autosummary:: :toctree: generated/ linkage single complete average weighted centroid median ward These routines compute statistics on hierarchies. .. autosummary:: :toctree: generated/ cophenet from_mlab_linkage inconsistent maxinconsts maxdists maxRstat to_mlab_linkage Routines for visualizing flat clusters. .. autosummary:: :toctree: generated/ dendrogram These are data structures and routines for representing hierarchies as tree objects. .. autosummary:: :toctree: generated/ ClusterNode leaves_list to_tree These are predicates for checking the validity of linkage and inconsistency matrices as well as for checking isomorphism of two flat cluster assignments. .. autosummary:: :toctree: generated/ is_valid_im is_valid_linkage is_isomorphic is_monotonic correspond num_obs_linkage * MATLAB and MathWorks are registered trademarks of The MathWorks, Inc. * Mathematica is a registered trademark of The Wolfram Research, Inc. References ---------- .. [Sta07] "Statistics toolbox." API Reference Documentation. The MathWorks. http://www.mathworks.com/access/helpdesk/help/toolbox/stats/. Accessed October 1, 2007. .. [Mti07] "Hierarchical clustering." API Reference Documentation. The Wolfram Research, Inc. http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/HierarchicalClustering.html. Accessed October 1, 2007. .. [Gow69] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969. .. [War63] Ward Jr, JH. "Hierarchical grouping to optimize an objective function." Journal of the American Statistical Association. 58(301): pp. 236--44. 1963. .. [Joh66] Johnson, SC. "Hierarchical clustering schemes." Psychometrika. 32(2): pp. 241--54. 1966. .. [Sne62] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp. 855--60. 1962. .. [Bat95] Batagelj, V. "Comparing resemblance measures." Journal of Classification. 12: pp. 73--90. 1995. .. [Sok58] Sokal, RR and Michener, CD. "A statistical method for evaluating systematic relationships." Scientific Bulletins. 38(22): pp. 1409--38. 1958. .. [Ede79] Edelbrock, C. "Mixture model tests of hierarchical clustering algorithms: the problem of classifying everybody." Multivariate Behavioral Research. 14: pp. 367--84. 1979. .. [Jai88] Jain, A., and Dubes, R., "Algorithms for Clustering Data." Prentice-Hall. Englewood Cliffs, NJ. 1988. .. [Fis36] Fisher, RA "The use of multiple measurements in taxonomic problems." Annals of Eugenics, 7(2): 179-188. 1936 Copyright Notice ---------------- Copyright (C) Damian Eads, 2007-2008. New BSD License. """ # hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) # # Author: Damian Eads # Date: September 22, 2007 # # Copyright (c) 2007, 2008, Damian Eads # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # - Neither the name of the author nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import types import numpy as np # TODO: _hierarchy_wrap and spatial have not been ported yet. #import _hierarchy_wrap #import scipy.spatial.distance as distance _cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2, 'weighted': 6} _cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5} _cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union( set(_cpy_euclid_methods.keys())) try: import warnings def _warning(s): warnings.warn('scipy.cluster: %s' % s, stacklevel=3) except: def _warning(s): print ('[WARNING] scipy.cluster: %s' % s) def _copy_array_if_base_present(a): """ Copies the array if its base points to a parent array. """ if a.base is not None: return a.copy() elif np.issubsctype(a, np.float32): return np.array(a, dtype=np.double) else: return a def _copy_arrays_if_base_present(T): """ Accepts a tuple of arrays T. Copies the array T[i] if its base array points to an actual array. Otherwise, the reference is just copied. This is useful if the arrays are being passed to a C function that does not do proper striding. """ l = [_copy_array_if_base_present(a) for a in T] return l def _randdm(pnts): """ Generates a random distance matrix stored in condensed form. A pnts * (pnts - 1) / 2 sized vector is returned. """ if pnts >= 2: D = np.random.rand(pnts * (pnts - 1) / 2) else: raise ValueError("The number of points in the distance matrix must be at least 2.") return D def single(y): """ Performs single/min/nearest linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. :Parameters: y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. :Returns: Z : ndarray The linkage matrix. :SeeAlso: - linkage: for advanced creation of hierarchical clusterings. """ return linkage(y, method='single', metric='euclidean') def complete(y): """ Performs complete complete/max/farthest point linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. :Parameters: y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. :Returns: Z : ndarray A linkage matrix containing the hierarchical clustering. See the ``linkage`` function documentation for more information on its structure. """ return linkage(y, method='complete', metric='euclidean') def average(y): """ Performs average/UPGMA linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. :Parameters: y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. :Returns: Z : ndarray A linkage matrix containing the hierarchical clustering. See the ``linkage`` function documentation for more information on its structure. :SeeAlso: - linkage: for advanced creation of hierarchical clusterings. """ return linkage(y, method='average', metric='euclidean') def weighted(y): """ Performs weighted/WPGMA linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. :Parameters: y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. :Returns: Z : ndarray A linkage matrix containing the hierarchical clustering. See the ``linkage`` function documentation for more information on its structure. :SeeAlso: - linkage: for advanced creation of hierarchical clusterings. """ return linkage(y, method='weighted', metric='euclidean') def centroid(y): """ Performs centroid/UPGMC linkage. See ``linkage`` for more information on the return structure and algorithm. The following are common calling conventions: 1. ``Z = centroid(y)`` Performs centroid/UPGMC linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. 2. ``Z = centroid(X)`` Performs centroid/UPGMC linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. See ``linkage`` for more information on the return structure and algorithm. :Parameters: Q : ndarray A condensed or redundant distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as a m by n array. :Returns: Z : ndarray A linkage matrix containing the hierarchical clustering. See the ``linkage`` function documentation for more information on its structure. :SeeAlso: - linkage: for advanced creation of hierarchical clusterings. """ return linkage(y, method='centroid', metric='euclidean') def median(y): """ Performs median/WPGMC linkage. See ``linkage`` for more information on the return structure and algorithm. The following are common calling conventions: 1. ``Z = median(y)`` Performs median/WPGMC linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. 2. ``Z = median(X)`` Performs median/WPGMC linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. See linkage for more information on the return structure and algorithm. :Parameters: Q : ndarray A condensed or redundant distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as a m by n array. :Returns: - Z : ndarray The hierarchical clustering encoded as a linkage matrix. :SeeAlso: - linkage: for advanced creation of hierarchical clusterings. """ return linkage(y, method='median', metric='euclidean') def ward(y): """ Performs Ward's linkage on a condensed or redundant distance matrix. See linkage for more information on the return structure and algorithm. The following are common calling conventions: 1. ``Z = ward(y)`` Performs Ward's linkage on the condensed distance matrix ``Z``. See linkage for more information on the return structure and algorithm. 2. ``Z = ward(X)`` Performs Ward's linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. See linkage for more information on the return structure and algorithm. :Parameters: Q : ndarray A condensed or redundant distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as a m by n array. :Returns: - Z : ndarray The hierarchical clustering encoded as a linkage matrix. :SeeAlso: - linkage: for advanced creation of hierarchical clusterings. """ return linkage(y, method='ward', metric='euclidean') def linkage(y, method='single', metric='euclidean'): """ Performs hierarchical/agglomerative clustering on the condensed distance matrix y. y must be a :math:`{n \\choose 2}` sized vector where n is the number of original observations paired in the distance matrix. The behavior of this function is very similar to the MATLAB(TM) linkage function. A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A cluster with an index less than :math:`n` corresponds to one of the :math:`n` original observations. The distance between clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The fourth value ``Z[i, 3]`` represents the number of original observations in the newly formed cluster. The following linkage methods are used to compute the distance :math:`d(s, t)` between two clusters :math:`s` and :math:`t`. The algorithm begins with a forest of clusters that have yet to be used in the hierarchy being formed. When two clusters :math:`s` and :math:`t` from this forest are combined into a single cluster :math:`u`, :math:`s` and :math:`t` are removed from the forest, and :math:`u` is added to the forest. When only one cluster remains in the forest, the algorithm stops, and this cluster becomes the root. A distance matrix is maintained at each iteration. The ``d[i,j]`` entry corresponds to the distance between cluster :math:`i` and :math:`j` in the original forest. At each iteration, the algorithm must update the distance matrix to reflect the distance of the newly formed cluster u with the remaining clusters in the forest. Suppose there are :math:`|u|` original observations :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in cluster :math:`v`. Recall :math:`s` and :math:`t` are combined to form cluster :math:`u`. Let :math:`v` be any remaining cluster in the forest that is not :math:`u`. The following are methods for calculating the distance between the newly formed cluster :math:`u` and each :math:`v`. * method='single' assigns .. math:: d(u,v) = \\min(dist(u[i],v[j])) for all points :math:`i` in cluster :math:`u` and :math:`j` in cluster :math:`v`. This is also known as the Nearest Point Algorithm. * method='complete' assigns .. math:: d(u, v) = \\max(dist(u[i],v[j])) for all points :math:`i` in cluster u and :math:`j` in cluster :math:`v`. This is also known by the Farthest Point Algorithm or Voor Hees Algorithm. * method='average' assigns .. math:: d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} {(|u|*|v|)} for all points :math:`i` and :math:`j` where :math:`|u|` and :math:`|v|` are the cardinalities of clusters :math:`u` and :math:`v`, respectively. This is also called the UPGMA algorithm. This is called UPGMA. * method='weighted' assigns .. math:: d(u,v) = (dist(s,v) + dist(t,v))/2 where cluster u was formed with cluster s and t and v is a remaining cluster in the forest. (also called WPGMA) * method='centroid' assigns .. math:: dist(s,t) = ||c_s-c_t||_2 where :math:`c_s` and :math:`c_t` are the centroids of clusters :math:`s` and :math:`t`, respectively. When two clusters :math:`s` and :math:`t` are combined into a new cluster :math:`u`, the new centroid is computed over all the original objects in clusters :math:`s` and :math:`t`. The distance then becomes the Euclidean distance between the centroid of :math:`u` and the centroid of a remaining cluster :math:`v` in the forest. This is also known as the UPGMC algorithm. * method='median' assigns math:`d(s,t)` like the ``centroid`` method. When two clusters :math:`s` and :math:`t` are combined into a new cluster :math:`u`, the average of centroids s and t give the new centroid :math:`u`. This is also known as the WPGMC algorithm. * method='ward' uses the Ward variance minimization algorithm. The new entry :math:`d(u,v)` is computed as follows, .. math:: d(u,v) = \\sqrt{\\frac{|v|+|s|} {T}d(v,s)^2 + \\frac{|v|+|t|} {T}d(v,t)^2 + \\frac{|v|} {T}d(s,t)^2} where :math:`u` is the newly joined cluster consisting of clusters :math:`s` and :math:`t`, :math:`v` is an unused cluster in the forest, :math:`T=|v|+|s|+|t|`, and :math:`|*|` is the cardinality of its argument. This is also known as the incremental algorithm. Warning: When the minimum distance pair in the forest is chosen, there may be two or more pairs with the same minimum distance. This implementation may chose a different minimum than the MATLAB(TM) version. :Parameters: - y : ndarray A condensed or redundant distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of :math:`m` observation vectors in n dimensions may be passed as an :math:`m` by :math:`n` array. - method : string The linkage algorithm to use. See the ``Linkage Methods`` section below for full descriptions. - metric : string The distance metric to use. See the ``distance.pdist`` function for a list of valid distance metrics. :Returns: - Z : ndarray The hierarchical clustering encoded as a linkage matrix. """ if not isinstance(method, str): raise TypeError("Argument 'method' must be a string.") y = _convert_to_double(np.asarray(y, order='c')) s = y.shape if len(s) == 1: distance.is_valid_y(y, throw=True, name='y') d = distance.num_obs_y(y) if method not in _cpy_non_euclid_methods.keys(): raise ValueError("Valid methods when the raw observations are omitted are 'single', 'complete', 'weighted', and 'average'.") # Since the C code does not support striding using strides. [y] = _copy_arrays_if_base_present([y]) Z = np.zeros((d - 1, 4)) _hierarchy_wrap.linkage_wrap(y, Z, int(d), \ int(_cpy_non_euclid_methods[method])) elif len(s) == 2: X = y n = s[0] m = s[1] if method not in _cpy_linkage_methods: raise ValueError('Invalid method: %s' % method) if method in _cpy_non_euclid_methods.keys(): dm = distance.pdist(X, metric) Z = np.zeros((n - 1, 4)) _hierarchy_wrap.linkage_wrap(dm, Z, n, \ int(_cpy_non_euclid_methods[method])) elif method in _cpy_euclid_methods.keys(): if metric != 'euclidean': raise ValueError('Method %s requires the distance metric to be euclidean' % s) dm = distance.pdist(X, metric) Z = np.zeros((n - 1, 4)) _hierarchy_wrap.linkage_euclid_wrap(dm, Z, X, m, n, int(_cpy_euclid_methods[method])) return Z class ClusterNode: """ A tree node class for representing a cluster. Leaf nodes correspond to original observations, while non-leaf nodes correspond to non-singleton clusters. The to_tree function converts a matrix returned by the linkage function into an easy-to-use tree representation. :SeeAlso: - to_tree: for converting a linkage matrix ``Z`` into a tree object. """ def __init__(self, id, left=None, right=None, dist=0, count=1): if id < 0: raise ValueError('The id must be non-negative.') if dist < 0: raise ValueError('The distance must be non-negative.') if (left is None and right is not None) or \ (left is not None and right is None): raise ValueError('Only full or proper binary trees are permitted. This node has one child.') if count < 1: raise ValueError('A cluster must contain at least one original observation.') self.id = id self.left = left self.right = right self.dist = dist if self.left is None: self.count = count else: self.count = left.count + right.count def get_id(self): r""" The identifier of the target node. For :math:`0 \leq i < n`, :math:`i` corresponds to original observation :math:`i`. For :math:`n \leq i` < :math:`2n-1`, :math:`i` corresponds to non-singleton cluster formed at iteration :math:`i-n`. :Returns: id : int The identifier of the target node. """ return self.id def get_count(self): """ The number of leaf nodes (original observations) belonging to the cluster node nd. If the target node is a leaf, 1 is returned. :Returns: c : int The number of leaf nodes below the target node. """ return self.count def get_left(self): """ Returns a reference to the left child tree object. If the node is a leaf, None is returned. :Returns: left : ClusterNode The left child of the target node. """ return self.left def get_right(self): """ Returns a reference to the right child tree object. If the node is a leaf, None is returned. :Returns: right : ClusterNode The left child of the target node. """ return self.right def is_leaf(self): """ Returns True iff the target node is a leaf. :Returns: leafness : bool True if the target node is a leaf node. """ return self.left is None def pre_order(self, func=(lambda x: x.id)): """ Performs preorder traversal without recursive function calls. When a leaf node is first encountered, ``func`` is called with the leaf node as its argument, and its result is appended to the list. For example, the statement: ids = root.pre_order(lambda x: x.id) returns a list of the node ids corresponding to the leaf nodes of the tree as they appear from left to right. :Parameters: - func : function Applied to each leaf ClusterNode object in the pre-order traversal. Given the i'th leaf node in the pre-order traversal ``n[i]``, the result of func(n[i]) is stored in L[i]. If not provided, the index of the original observation to which the node corresponds is used. :Returns: - L : list The pre-order traversal. """ # Do a preorder traversal, caching the result. To avoid having to do # recursion, we'll store the previous index we've visited in a vector. n = self.count curNode = [None] * (2 * n) lvisited = np.zeros((2 * n,), dtype=bool) rvisited = np.zeros((2 * n,), dtype=bool) curNode[0] = self k = 0 preorder = [] while k >= 0: nd = curNode[k] ndid = nd.id if nd.is_leaf(): preorder.append(func(nd)) k = k - 1 else: if not lvisited[ndid]: curNode[k + 1] = nd.left lvisited[ndid] = True k = k + 1 elif not rvisited[ndid]: curNode[k + 1] = nd.right rvisited[ndid] = True k = k + 1 # If we've visited the left and right of this non-leaf # node already, go up in the tree. else: k = k - 1 return preorder _cnode_bare = ClusterNode(0) _cnode_type = type(ClusterNode) def to_tree(Z, rd=False): """ Converts a hierarchical clustering encoded in the matrix ``Z`` (by linkage) into an easy-to-use tree object. The reference r to the root ClusterNode object is returned. Each ClusterNode object has a left, right, dist, id, and count attribute. The left and right attributes point to ClusterNode objects that were combined to generate the cluster. If both are None then the ClusterNode object is a leaf node, its count must be 1, and its distance is meaningless but set to 0. Note: This function is provided for the convenience of the library user. ClusterNodes are not used as input to any of the functions in this library. :Parameters: - Z : ndarray The linkage matrix in proper form (see the ``linkage`` function documentation). - r : bool When ``False``, a reference to the root ClusterNode object is returned. Otherwise, a tuple (r,d) is returned. ``r`` is a reference to the root node while ``d`` is a dictionary mapping cluster ids to ClusterNode references. If a cluster id is less than n, then it corresponds to a singleton cluster (leaf node). See ``linkage`` for more information on the assignment of cluster ids to clusters. :Returns: - L : list The pre-order traversal. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') # The number of original objects is equal to the number of rows minus # 1. n = Z.shape[0] + 1 # Create a list full of None's to store the node objects d = [None] * (n*2-1) # Create the nodes corresponding to the n original objects. for i in xrange(0, n): d[i] = ClusterNode(i) nd = None for i in xrange(0, n - 1): fi = int(Z[i, 0]) fj = int(Z[i, 1]) if fi > i + n: raise ValueError('Corrupt matrix Z. Index to derivative cluster is used before it is formed. See row %d, column 0' % fi) if fj > i + n: raise ValueError('Corrupt matrix Z. Index to derivative cluster is used before it is formed. See row %d, column 1' % fj) nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2]) # ^ id ^ left ^ right ^ dist if Z[i,3] != nd.count: raise ValueError('Corrupt matrix Z. The count Z[%d,3] is incorrect.' % i) d[n + i] = nd if rd: return (nd, d) else: return nd def _convert_to_bool(X): if X.dtype != np.bool: X = np.bool_(X) if not X.flags.contiguous: X = X.copy() return X def _convert_to_double(X): if X.dtype != np.double: X = np.double(X) if not X.flags.contiguous: X = X.copy() return X def cophenet(Z, Y=None): """ Calculates the cophenetic distances between each observation in the hierarchical clustering defined by the linkage ``Z``. Suppose ``p`` and ``q`` are original observations in disjoint clusters ``s`` and ``t``, respectively and ``s`` and ``t`` are joined by a direct parent cluster ``u``. The cophenetic distance between observations ``i`` and ``j`` is simply the distance between clusters ``s`` and ``t``. :Parameters: - Z : ndarray The hierarchical clustering encoded as an array (see ``linkage`` function). - Y : ndarray (optional) Calculates the cophenetic correlation coefficient ``c`` of a hierarchical clustering defined by the linkage matrix ``Z`` of a set of :math:`n` observations in :math:`m` dimensions. ``Y`` is the condensed distance matrix from which ``Z`` was generated. :Returns: (c, {d}) - c : ndarray The cophentic correlation distance (if ``y`` is passed). - d : ndarray The cophenetic distance matrix in condensed form. The :math:`ij` th entry is the cophenetic distance between original observations :math:`i` and :math:`j`. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape n = Zs[0] + 1 zz = np.zeros((n*(n-1)/2,), dtype=np.double) # Since the C code does not support striding using strides. # The dimensions are used instead. Z = _convert_to_double(Z) _hierarchy_wrap.cophenetic_distances_wrap(Z, zz, int(n)) if Y is None: return zz Y = np.asarray(Y, order='c') Ys = Y.shape distance.is_valid_y(Y, throw=True, name='Y') z = zz.mean() y = Y.mean() Yy = Y - y Zz = zz - z #print Yy.shape, Zz.shape numerator = (Yy * Zz) denomA = Yy ** 2 denomB = Zz ** 2 c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum())) #print c, numerator.sum() return (c, zz) def inconsistent(Z, d=2): r""" Calculates inconsistency statistics on a linkage. Note: This function behaves similarly to the MATLAB(TM) inconsistent function. :Parameters: - d : int The number of links up to ``d`` levels below each non-singleton cluster - Z : ndarray The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical clustering). See ``linkage`` documentation for more information on its form. :Returns: - R : ndarray A :math:`(n-1)` by 5 matrix where the ``i``'th row contains the link statistics for the non-singleton cluster ``i``. The link statistics are computed over the link heights for links :math:`d` levels below the cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard deviation of the link heights, respectively; ``R[i,2]`` is the number of links included in the calculation; and ``R[i,3]`` is the inconsistency coefficient, .. math:: \frac{\mathtt{Z[i,2]}-\mathtt{R[i,0]}} {R[i,1]}. """ Z = np.asarray(Z, order='c') Zs = Z.shape is_valid_linkage(Z, throw=True, name='Z') if (not d == np.floor(d)) or d < 0: raise ValueError('The second argument d must be a nonnegative integer value.') # if d == 0: # d = 1 # Since the C code does not support striding using strides. # The dimensions are used instead. [Z] = _copy_arrays_if_base_present([Z]) n = Zs[0] + 1 R = np.zeros((n - 1, 4), dtype=np.double) _hierarchy_wrap.inconsistent_wrap(Z, R, int(n), int(d)); return R def from_mlab_linkage(Z): """ Converts a linkage matrix generated by MATLAB(TM) to a new linkage matrix compatible with this module. The conversion does two things: * the indices are converted from ``1..N`` to ``0..(N-1)`` form, and * a fourth column Z[:,3] is added where Z[i,3] is represents the number of original observations (leaves) in the non-singleton cluster i. This function is useful when loading in linkages from legacy data files generated by MATLAB. :Arguments: - Z : ndarray A linkage matrix generated by MATLAB(TM) :Returns: - ZS : ndarray A linkage matrix compatible with this library. """ Z = np.asarray(Z, dtype=np.double, order='c') Zs = Z.shape # If it's empty, return it. if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): return Z.copy() if len(Zs) != 2: raise ValueError("The linkage array must be rectangular.") # If it contains no rows, return it. if Zs[0] == 0: return Z.copy() Zpart = Z.copy() if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]: raise ValueError('The format of the indices is not 1..N'); Zpart[:, 0:2] -= 1.0 CS = np.zeros((Zs[0],), dtype=np.double) _hierarchy_wrap.calculate_cluster_sizes_wrap(Zpart, CS, int(Zs[0]) + 1) return np.hstack([Zpart, CS.reshape(Zs[0], 1)]) def to_mlab_linkage(Z): """ Converts a linkage matrix ``Z`` generated by the linkage function of this module to a MATLAB(TM) compatible one. The return linkage matrix has the last column removed and the cluster indices are converted to ``1..N`` indexing. :Arguments: - Z : ndarray A linkage matrix generated by this library. :Returns: - ZM : ndarray A linkage matrix compatible with MATLAB(TM)'s hierarchical clustering functions. """ Z = np.asarray(Z, order='c', dtype=np.double) Zs = Z.shape if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): return Z.copy() is_valid_linkage(Z, throw=True, name='Z') ZP = Z[:, 0:3].copy() ZP[:,0:2] += 1.0 return ZP def is_monotonic(Z): """ Returns ``True`` if the linkage passed is monotonic. The linkage is monotonic if for every cluster :math:`s` and :math:`t` joined, the distance between them is no less than the distance between any previously joined clusters. :Arguments: - Z : ndarray The linkage matrix to check for monotonicity. :Returns: - b : bool A boolean indicating whether the linkage is monotonic. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') # We expect the i'th value to be greater than its successor. return (Z[1:,2]>=Z[:-1,2]).all() def is_valid_im(R, warning=False, throw=False, name=None): """ Returns True if the inconsistency matrix passed is valid. It must be a :math:`n` by 4 numpy array of doubles. The standard deviations ``R[:,1]`` must be nonnegative. The link counts ``R[:,2]`` must be positive and no greater than :math:`n-1`. :Arguments: - R : ndarray The inconsistency matrix to check for validity. - warning : bool When ``True``, issues a Python warning if the linkage matrix passed is invalid. - throw : bool When ``True``, throws a Python exception if the linkage matrix passed is invalid. - name : string This string refers to the variable name of the invalid linkage matrix. :Returns: - b : bool True iff the inconsistency matrix is valid. """ R = np.asarray(R, order='c') valid = True try: if type(R) != np.ndarray: if name: raise TypeError('Variable \'%s\' passed as inconsistency matrix is not a numpy array.' % name) else: raise TypeError('Variable passed as inconsistency matrix is not a numpy array.') if R.dtype != np.double: if name: raise TypeError('Inconsistency matrix \'%s\' must contain doubles (double).' % name) else: raise TypeError('Inconsistency matrix must contain doubles (double).') if len(R.shape) != 2: if name: raise ValueError('Inconsistency matrix \'%s\' must have shape=2 (i.e. be two-dimensional).' % name) else: raise ValueError('Inconsistency matrix must have shape=2 (i.e. be two-dimensional).') if R.shape[1] != 4: if name: raise ValueError('Inconsistency matrix \'%s\' must have 4 columns.' % name) else: raise ValueError('Inconsistency matrix must have 4 columns.') if R.shape[0] < 1: if name: raise ValueError('Inconsistency matrix \'%s\' must have at least one row.' % name) else: raise ValueError('Inconsistency matrix must have at least one row.') if (R[:, 0] < 0).any(): if name: raise ValueError('Inconsistency matrix \'%s\' contains negative link height means.' % name) else: raise ValueError('Inconsistency matrix contains negative link height means.') if (R[:, 1] < 0).any(): if name: raise ValueError('Inconsistency matrix \'%s\' contains negative link height standard deviations.' % name) else: raise ValueError('Inconsistency matrix contains negative link height standard deviations.') if (R[:, 2] < 0).any(): if name: raise ValueError('Inconsistency matrix \'%s\' contains negative link counts.' % name) else: raise ValueError('Inconsistency matrix contains negative link counts.') except Exception, e: if throw: raise if warning: _warning(str(e)) valid = False return valid def is_valid_linkage(Z, warning=False, throw=False, name=None): r""" Checks the validity of a linkage matrix. A linkage matrix is valid if it is a two dimensional nd-array (type double) with :math:`n` rows and 4 columns. The first two columns must contain indices between 0 and :math:`2n-1`. For a given row ``i``, :math:`0 \leq \mathtt{Z[i,0]} \leq i+n-1` and :math:`0 \leq Z[i,1] \leq i+n-1` (i.e. a cluster cannot join another cluster unless the cluster being joined has been generated.) :Arguments: - warning : bool When ``True``, issues a Python warning if the linkage matrix passed is invalid. - throw : bool When ``True``, throws a Python exception if the linkage matrix passed is invalid. - name : string This string refers to the variable name of the invalid linkage matrix. :Returns: - b : bool True iff the inconsistency matrix is valid. """ Z = np.asarray(Z, order='c') valid = True try: if type(Z) != np.ndarray: if name: raise TypeError('\'%s\' passed as a linkage is not a valid array.' % name) else: raise TypeError('Variable is not a valid array.') if Z.dtype != np.double: if name: raise TypeError('Linkage matrix \'%s\' must contain doubles.' % name) else: raise TypeError('Linkage matrix must contain doubles.') if len(Z.shape) != 2: if name: raise ValueError('Linkage matrix \'%s\' must have shape=2 (i.e. be two-dimensional).' % name) else: raise ValueError('Linkage matrix must have shape=2 (i.e. be two-dimensional).') if Z.shape[1] != 4: if name: raise ValueError('Linkage matrix \'%s\' must have 4 columns.' % name) else: raise ValueError('Linkage matrix must have 4 columns.') if Z.shape[0] == 0: raise ValueError('Linkage must be computed on at least two observations.') n = Z.shape[0] if n > 1: if ((Z[:,0] < 0).any() or (Z[:,1] < 0).any()): if name: raise ValueError('Linkage \'%s\' contains negative indices.' % name) else: raise ValueError('Linkage contains negative indices.') if (Z[:, 2] < 0).any(): if name: raise ValueError('Linkage \'%s\' contains negative distances.' % name) else: raise ValueError('Linkage contains negative distances.') if (Z[:, 3] < 0).any(): if name: raise ValueError('Linkage \'%s\' contains negative counts.' % name) else: raise ValueError('Linkage contains negative counts.') if _check_hierarchy_uses_cluster_before_formed(Z): if name: raise ValueError('Linkage \'%s\' uses non-singleton cluster before its formed.' % name) else: raise ValueError('Linkage uses non-singleton cluster before its formed.') if _check_hierarchy_uses_cluster_more_than_once(Z): if name: raise ValueError('Linkage \'%s\' uses the same cluster more than once.' % name) else: raise ValueError('Linkage uses the same cluster more than once.') # if _check_hierarchy_not_all_clusters_used(Z): # if name: # raise ValueError('Linkage \'%s\' does not use all clusters.' % name) # else: # raise ValueError('Linkage does not use all clusters.') except Exception, e: if throw: raise if warning: _warning(str(e)) valid = False return valid def _check_hierarchy_uses_cluster_before_formed(Z): n = Z.shape[0] + 1 for i in xrange(0, n - 1): if Z[i, 0] >= n + i or Z[i, 1] >= n + i: return True return False def _check_hierarchy_uses_cluster_more_than_once(Z): n = Z.shape[0] + 1 chosen = set([]) for i in xrange(0, n - 1): if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]: return True chosen.add(Z[i, 0]) chosen.add(Z[i, 1]) return False def _check_hierarchy_not_all_clusters_used(Z): n = Z.shape[0] + 1 chosen = set([]) for i in xrange(0, n - 1): chosen.add(int(Z[i, 0])) chosen.add(int(Z[i, 1])) must_chosen = set(range(0, 2 * n - 2)) return len(must_chosen.difference(chosen)) > 0 def num_obs_linkage(Z): """ Returns the number of original observations of the linkage matrix passed. :Arguments: - Z : ndarray The linkage matrix on which to perform the operation. :Returns: - n : int The number of original observations in the linkage. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') return (Z.shape[0] + 1) def correspond(Z, Y): """ Checks if a linkage matrix ``Z`` and condensed distance matrix ``Y`` could possibly correspond to one another. They must have the same number of original observations for the check to succeed. This function is useful as a sanity check in algorithms that make extensive use of linkage and distance matrices that must correspond to the same set of original observations. :Arguments: - Z : ndarray The linkage matrix to check for correspondance. - Y : ndarray The condensed distance matrix to check for correspondance. :Returns: - b : bool A boolean indicating whether the linkage matrix and distance matrix could possibly correspond to one another. """ is_valid_linkage(Z, throw=True) distance.is_valid_y(Y, throw=True) Z = np.asarray(Z, order='c') Y = np.asarray(Y, order='c') return distance.num_obs_y(Y) == num_obs_linkage(Z) def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): """ Forms flat clusters from the hierarchical clustering defined by the linkage matrix ``Z``. Parameters ---------- Z : ndarray The hierarchical clustering encoded with the matrix returned by the `linkage` function. t : float The threshold to apply when forming flat clusters. criterion : str, optional The criterion to use in forming flat clusters. This can be any of the following values: 'inconsistent': If a cluster node and all its descendants have an inconsistent value less than or equal to ``t`` then all its leaf descendants belong to the same flat cluster. When no non-singleton cluster meets this criterion, every node is assigned to its own cluster. (Default) 'distance': Forms flat clusters so that the original observations in each flat cluster have no greater a cophenetic distance than ``t``. 'maxclust': Finds a minimum threshold ``r`` so that the cophenetic distance between any two original observations in the same flat cluster is no more than ``r`` and no more than ``t`` flat clusters are formed. 'monocrit': Forms a flat cluster from a cluster node c with index i when ``monocrit[j] <= t``. For example, to threshold on the maximum mean distance as computed in the inconsistency matrix R with a threshold of 0.8 do:: MR = maxRstat(Z, R, 3) cluster(Z, t=0.8, criterion='monocrit', monocrit=MR) 'maxclust_monocrit': Forms a flat cluster from a non-singleton cluster node ``c`` when ``monocrit[i] <= r`` for all cluster indices ``i`` below and including ``c``. ``r`` is minimized such that no more than ``t`` flat clusters are formed. monocrit must be monotonic. For example, to minimize the threshold t on maximum inconsistency values so that no more than 3 flat clusters are formed, do:: MI = maxinconsts(Z, R) cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) depth : int, optional The maximum depth to perform the inconsistency calculation. It has no meaning for the other criteria. Default is 2. R : ndarray, optional The inconsistency matrix to use for the 'inconsistent' criterion. This matrix is computed if not provided. monocrit : ndarray, optional An array of length n-1. ``monocrit[i]`` is the statistics upon which non-singleton i is thresholded. The monocrit vector must be monotonic, i.e. given a node c with index i, for all node indices j corresponding to nodes below c, ``monocrit[i] >= monocrit[j]``. Returns ------- fcluster : ndarray An array of length n. T[i] is the flat cluster number to which original observation i belongs. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 T = np.zeros((n,), dtype='i') # Since the C code does not support striding using strides. # The dimensions are used instead. [Z] = _copy_arrays_if_base_present([Z]) if criterion == 'inconsistent': if R is None: R = inconsistent(Z, depth) else: R = np.asarray(R, order='c') is_valid_im(R, throw=True, name='R') # Since the C code does not support striding using strides. # The dimensions are used instead. [R] = _copy_arrays_if_base_present([R]) _hierarchy_wrap.cluster_in_wrap(Z, R, T, float(t), int(n)) elif criterion == 'distance': _hierarchy_wrap.cluster_dist_wrap(Z, T, float(t), int(n)) elif criterion == 'maxclust': _hierarchy_wrap.cluster_maxclust_dist_wrap(Z, T, int(n), int(t)) elif criterion == 'monocrit': [monocrit] = _copy_arrays_if_base_present([monocrit]) _hierarchy_wrap.cluster_monocrit_wrap(Z, monocrit, T, float(t), int(n)) elif criterion == 'maxclust_monocrit': [monocrit] = _copy_arrays_if_base_present([monocrit]) _hierarchy_wrap.cluster_maxclust_monocrit_wrap(Z, monocrit, T, int(n), int(t)) else: raise ValueError('Invalid cluster formation criterion: %s' % str(criterion)) return T def fclusterdata(X, t, criterion='inconsistent', \ metric='euclidean', depth=2, method='single', R=None): """ Cluster observation data using a given metric. Clusters the original observations in the n-by-m data matrix X (n observations in m dimensions), using the euclidean distance metric to calculate distances between original observations, performs hierarchical clustering using the single linkage algorithm, and forms flat clusters using the inconsistency method with `t` as the cut-off threshold. A one-dimensional array T of length n is returned. T[i] is the index of the flat cluster to which the original observation i belongs. Parameters ---------- X : ndarray n by m data matrix with n observations in m dimensions. t : float The threshold to apply when forming flat clusters. criterion : str, optional Specifies the criterion for forming flat clusters. Valid values are 'inconsistent' (default), 'distance', or 'maxclust' cluster formation algorithms. See `fcluster` for descriptions. method : str, optional The linkage method to use (single, complete, average, weighted, median centroid, ward). See `linkage` for more information. Default is "single". metric : str, optional The distance metric for calculating pairwise distances. See `distance.pdist` for descriptions and linkage to verify compatibility with the linkage method. t : double, optional The cut-off threshold for the cluster function or the maximum number of clusters (criterion='maxclust'). depth : int, optional The maximum depth for the inconsistency calculation. See `inconsistent` for more information. R : ndarray, optional The inconsistency matrix. It will be computed if necessary if it is not passed. Returns ------- T : ndarray A vector of length n. T[i] is the flat cluster number to which original observation i belongs. Notes ----- This function is similar to the MATLAB function clusterdata. """ X = np.asarray(X, order='c', dtype=np.double) if type(X) != np.ndarray or len(X.shape) != 2: raise TypeError('The observation matrix X must be an n by m numpy array.') Y = distance.pdist(X, metric=metric) Z = linkage(Y, method=method) if R is None: R = inconsistent(Z, d=depth) else: R = np.asarray(R, order='c') T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) return T def leaves_list(Z): """ Returns a list of leaf node ids (corresponding to observation vector index) as they appear in the tree from left to right. Z is a linkage matrix. :Arguments: - Z : ndarray The hierarchical clustering encoded as a matrix. See ``linkage`` for more information. :Returns: - L : ndarray The list of leaf node ids. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 ML = np.zeros((n,), dtype='i') [Z] = _copy_arrays_if_base_present([Z]) _hierarchy_wrap.prelist_wrap(Z, ML, int(n)) return ML # Let's do a conditional import. If matplotlib is not available, try: import matplotlib try: import matplotlib.pylab import matplotlib.patches except RuntimeError, e: # importing matplotlib.pylab can fail with a RuntimeError if installed # but the graphic engine cannot be initialized (for example without X) raise ImportError("Could not import matplotib (error was %s)" % str(e)) #import matplotlib.collections _mpl = True # Maps number of leaves to text size. # # p <= 20, size="12" # 20 < p <= 30, size="10" # 30 < p <= 50, size="8" # 50 < p <= np.inf, size="6" _dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} _drotation = {20: 0, 40: 45, np.inf: 90} _dtextsortedkeys = list(_dtextsizes.keys()) _dtextsortedkeys.sort() _drotationsortedkeys = list(_drotation.keys()) _drotationsortedkeys.sort() def _remove_dups(L): """ Removes duplicates AND preserves the original order of the elements. The set class is not guaranteed to do this. """ seen_before = set([]) L2 = [] for i in L: if i not in seen_before: seen_before.add(i) L2.append(i) return L2 def _get_tick_text_size(p): for k in _dtextsortedkeys: if p <= k: return _dtextsizes[k] def _get_tick_rotation(p): for k in _drotationsortedkeys: if p <= k: return _drotation[k] def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, no_labels, color_list, leaf_font_size=None, leaf_rotation=None, contraction_marks=None): axis = matplotlib.pylab.gca() # Independent variable plot width ivw = len(ivl) * 10 # Depenendent variable plot height dvw = mh + mh * 0.05 ivticks = np.arange(5, len(ivl)*10+5, 10) if orientation == 'top': axis.set_ylim([0, dvw]) axis.set_xlim([0, ivw]) xlines = icoords ylines = dcoords if no_labels: axis.set_xticks([]) axis.set_xticklabels([]) else: axis.set_xticks(ivticks) axis.set_xticklabels(ivl) axis.xaxis.set_ticks_position('bottom') lbls=axis.get_xticklabels() if leaf_rotation: matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) else: matplotlib.pylab.setp(lbls, 'rotation', float(_get_tick_rotation(len(ivl)))) if leaf_font_size: matplotlib.pylab.setp(lbls, 'size', leaf_font_size) else: matplotlib.pylab.setp(lbls, 'size', float(_get_tick_text_size(len(ivl)))) # txt.set_fontsize() # txt.set_rotation(45) # Make the tick marks invisible because they cover up the links for line in axis.get_xticklines(): line.set_visible(False) elif orientation == 'bottom': axis.set_ylim([dvw, 0]) axis.set_xlim([0, ivw]) xlines = icoords ylines = dcoords if no_labels: axis.set_xticks([]) axis.set_xticklabels([]) else: axis.set_xticks(ivticks) axis.set_xticklabels(ivl) lbls=axis.get_xticklabels() if leaf_rotation: matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) else: matplotlib.pylab.setp(lbls, 'rotation', float(_get_tick_rotation(p))) if leaf_font_size: matplotlib.pylab.setp(lbls, 'size', leaf_font_size) else: matplotlib.pylab.setp(lbls, 'size', float(_get_tick_text_size(p))) axis.xaxis.set_ticks_position('top') # Make the tick marks invisible because they cover up the links for line in axis.get_xticklines(): line.set_visible(False) elif orientation == 'left': axis.set_xlim([0, dvw]) axis.set_ylim([0, ivw]) xlines = dcoords ylines = icoords if no_labels: axis.set_yticks([]) axis.set_yticklabels([]) else: axis.set_yticks(ivticks) axis.set_yticklabels(ivl) lbls=axis.get_yticklabels() if leaf_rotation: matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) if leaf_font_size: matplotlib.pylab.setp(lbls, 'size', leaf_font_size) axis.yaxis.set_ticks_position('left') # Make the tick marks invisible because they cover up the # links for line in axis.get_yticklines(): line.set_visible(False) elif orientation == 'right': axis.set_xlim([dvw, 0]) axis.set_ylim([0, ivw]) xlines = dcoords ylines = icoords if no_labels: axis.set_yticks([]) axis.set_yticklabels([]) else: axis.set_yticks(ivticks) axis.set_yticklabels(ivl) lbls=axis.get_yticklabels() if leaf_rotation: matplotlib.pylab.setp(lbls, 'rotation', leaf_rotation) if leaf_font_size: matplotlib.pylab.setp(lbls, 'size', leaf_font_size) axis.yaxis.set_ticks_position('right') # Make the tick marks invisible because they cover up the links for line in axis.get_yticklines(): line.set_visible(False) # Let's use collections instead. This way there is a separate legend item for each # tree grouping, rather than stupidly one for each line segment. colors_used = _remove_dups(color_list) color_to_lines = {} for color in colors_used: color_to_lines[color] = [] for (xline,yline,color) in zip(xlines, ylines, color_list): color_to_lines[color].append(zip(xline, yline)) colors_to_collections = {} # Construct the collections. for color in colors_used: coll = matplotlib.collections.LineCollection(color_to_lines[color], colors=(color,)) colors_to_collections[color] = coll # Add all the non-blue link groupings, i.e. those groupings below the color threshold. for color in colors_used: if color != 'b': axis.add_collection(colors_to_collections[color]) # If there is a blue grouping (i.e., links above the color threshold), # it should go last. if 'b' in colors_to_collections: axis.add_collection(colors_to_collections['b']) if contraction_marks is not None: #xs=[x for (x, y) in contraction_marks] #ys=[y for (x, y) in contraction_marks] if orientation in ('left', 'right'): for (x,y) in contraction_marks: e=matplotlib.patches.Ellipse((y, x), width=dvw/100, height=1.0) axis.add_artist(e) e.set_clip_box(axis.bbox) e.set_alpha(0.5) e.set_facecolor('k') if orientation in ('top', 'bottom'): for (x,y) in contraction_marks: e=matplotlib.patches.Ellipse((x, y), width=1.0, height=dvw/100) axis.add_artist(e) e.set_clip_box(axis.bbox) e.set_alpha(0.5) e.set_facecolor('k') #matplotlib.pylab.plot(xs, ys, 'go', markeredgecolor='k', markersize=3) #matplotlib.pylab.plot(ys, xs, 'go', markeredgecolor='k', markersize=3) matplotlib.pylab.draw_if_interactive() except ImportError: _mpl = False def _plot_dendrogram(*args, **kwargs): raise ImportError('matplotlib not available. Plot request denied.') _link_line_colors=['g', 'r', 'c', 'm', 'y', 'k'] def set_link_color_palette(palette): """ Changes the list of matplotlib color codes to use when coloring links with the dendrogram color_threshold feature. :Arguments: - palette : A list of matplotlib color codes. The order of the color codes is the order in which the colors are cycled through when color thresholding in the dendrogram. """ if type(palette) not in (types.ListType, types.TupleType): raise TypeError("palette must be a list or tuple") _ptypes = [type(p) == types.StringType for p in palette] if False in _ptypes: raise TypeError("all palette list elements must be color strings") for i in list(_link_line_colors): _link_line_colors.remove(i) _link_line_colors.extend(list(palette)) def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, get_leaves=True, orientation='top', labels=None, count_sort=False, distance_sort=False, show_leaf_counts=True, no_plot=False, no_labels=False, color_list=None, leaf_font_size=None, leaf_rotation=None, leaf_label_func=None, no_leaves=False, show_contracted=False, link_color_func=None): r""" Plots the hiearchical clustering defined by the linkage Z as a dendrogram. The dendrogram illustrates how each cluster is composed by drawing a U-shaped link between a non-singleton cluster and its children. The height of the top of the U-link is the distance between its children clusters. It is also the cophenetic distance between original observations in the two children clusters. It is expected that the distances in Z[:,2] be monotonic, otherwise crossings appear in the dendrogram. :Arguments: - Z : ndarray The linkage matrix encoding the hierarchical clustering to render as a dendrogram. See the ``linkage`` function for more information on the format of ``Z``. - truncate_mode : string The dendrogram can be hard to read when the original observation matrix from which the linkage is derived is large. Truncation is used to condense the dendrogram. There are several modes: * None/'none': no truncation is performed (Default) * 'lastp': the last ``p`` non-singleton formed in the linkage are the only non-leaf nodes in the linkage; they correspond to to rows ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are contracted into leaf nodes. * 'mlab': This corresponds to MATLAB(TM) behavior. (not implemented yet) * 'level'/'mtica': no more than ``p`` levels of the dendrogram tree are displayed. This corresponds to Mathematica(TM) behavior. - p : int The ``p`` parameter for ``truncate_mode``. ` - color_threshold : double For brevity, let :math:`t` be the ``color_threshold``. Colors all the descendent links below a cluster node :math:`k` the same color if :math:`k` is the first node below the cut threshold :math:`t`. All links connecting nodes with distances greater than or equal to the threshold are colored blue. If :math:`t` is less than or equal to zero, all nodes are colored blue. If ``color_threshold`` is ``None`` or 'default', corresponding with MATLAB(TM) behavior, the threshold is set to ``0.7*max(Z[:,2])``. - get_leaves : bool Includes a list ``R['leaves']=H`` in the result dictionary. For each :math:`i`, ``H[i] == j``, cluster node :math:`j` appears in the :math:`i` th position in the left-to-right traversal of the leaves, where :math:`j < 2n-1` and :math:`i < n`. - orientation : string The direction to plot the dendrogram, which can be any of the following strings * 'top': plots the root at the top, and plot descendent links going downwards. (default). * 'bottom': plots the root at the bottom, and plot descendent links going upwards. * 'left': plots the root at the left, and plot descendent links going right. * 'right': plots the root at the right, and plot descendent links going left. - labels : ndarray By default ``labels`` is ``None`` so the index of the original observation is used to label the leaf nodes. Otherwise, this is an :math:`n` -sized list (or tuple). The ``labels[i]`` value is the text to put under the :math:`i` th leaf node only if it corresponds to an original observation and not a non-singleton cluster. - count_sort : string/bool For each node n, the order (visually, from left-to-right) n's two descendent links are plotted is determined by this parameter, which can be any of the following values: * False: nothing is done. * 'ascending'/True: the child with the minimum number of original objects in its cluster is plotted first. * 'descendent': the child with the maximum number of original objects in its cluster is plotted first. Note ``distance_sort`` and ``count_sort`` cannot both be ``True``. - distance_sort : string/bool For each node n, the order (visually, from left-to-right) n's two descendent links are plotted is determined by this parameter, which can be any of the following values: * False: nothing is done. * 'ascending'/True: the child with the minimum distance between its direct descendents is plotted first. * 'descending': the child with the maximum distance between its direct descendents is plotted first. Note ``distance_sort`` and ``count_sort`` cannot both be ``True``. - show_leaf_counts : bool When ``True``, leaf nodes representing :math:`k>1` original observation are labeled with the number of observations they contain in parentheses. - no_plot : bool When ``True``, the final rendering is not performed. This is useful if only the data structures computed for the rendering are needed or if matplotlib is not available. - no_labels : bool When ``True``, no labels appear next to the leaf nodes in the rendering of the dendrogram. - leaf_label_rotation : double Specifies the angle (in degrees) to rotate the leaf labels. When unspecified, the rotation based on the number of nodes in the dendrogram. (Default=0) - leaf_font_size : int Specifies the font size (in points) of the leaf labels. When unspecified, the size based on the number of nodes in the dendrogram. - leaf_label_func : lambda or function When leaf_label_func is a callable function, for each leaf with cluster index :math:`k < 2n-1`. The function is expected to return a string with the label for the leaf. Indices :math:`k < n` correspond to original observations while indices :math:`k \geq n` correspond to non-singleton clusters. For example, to label singletons with their node id and non-singletons with their id, count, and inconsistency coefficient, simply do:: # First define the leaf label function. def llf(id): if id < n: return str(id) else: return '[%d %d %1.2f]' % (id, count, R[n-id,3]) # The text for the leaf nodes is going to be big so force # a rotation of 90 degrees. dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) - show_contracted : bool When ``True`` the heights of non-singleton nodes contracted into a leaf node are plotted as crosses along the link connecting that leaf node. This really is only useful when truncation is used (see ``truncate_mode`` parameter). - link_color_func : lambda/function When a callable function, link_color_function is called with each non-singleton id corresponding to each U-shaped link it will paint. The function is expected to return the color to paint the link, encoded as a matplotlib color string code. For example:: dendrogram(Z, link_color_func=lambda k: colors[k]) colors the direct links below each untruncated non-singleton node ``k`` using ``colors[k]``. :Returns: - R : dict A dictionary of data structures computed to render the dendrogram. Its has the following keys: - 'icoords': a list of lists ``[I1, I2, ..., Ip]`` where ``Ik`` is a list of 4 independent variable coordinates corresponding to the line that represents the k'th link painted. - 'dcoords': a list of lists ``[I2, I2, ..., Ip]`` where ``Ik`` is a list of 4 independent variable coordinates corresponding to the line that represents the k'th link painted. - 'ivl': a list of labels corresponding to the leaf nodes. - 'leaves': for each i, ``H[i] == j``, cluster node :math:`j` appears in the :math:`i` th position in the left-to-right traversal of the leaves, where :math:`j < 2n-1` and :math:`i < n`. If :math:`j` is less than :math:`n`, the :math:`i` th leaf node corresponds to an original observation. Otherwise, it corresponds to a non-singleton cluster. """ # Features under consideration. # # ... = dendrogram(..., leaves_order=None) # # Plots the leaves in the order specified by a vector of # original observation indices. If the vector contains duplicates # or results in a crossing, an exception will be thrown. Passing # None orders leaf nodes based on the order they appear in the # pre-order traversal. Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape n = Zs[0] + 1 if type(p) in (types.IntType, types.FloatType): p = int(p) else: raise TypeError('The second argument must be a number') if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None): raise ValueError('Invalid truncation mode.') if truncate_mode == 'lastp' or truncate_mode == 'mlab': if p > n or p == 0: p = n if truncate_mode == 'mtica' or truncate_mode == 'level': if p <= 0: p = np.inf if get_leaves: lvs = [] else: lvs = None icoord_list=[] dcoord_list=[] color_list=[] current_color=[0] currently_below_threshold=[False] if no_leaves: ivl=None else: ivl=[] if color_threshold is None or \ (type(color_threshold) == types.StringType and color_threshold=='default'): color_threshold = max(Z[:,2])*0.7 R={'icoord':icoord_list, 'dcoord':dcoord_list, 'ivl':ivl, 'leaves':lvs, 'color_list':color_list} props = {'cbt': False, 'cc':0} if show_contracted: contraction_marks = [] else: contraction_marks = None _dendrogram_calculate_info(Z=Z, p=p, truncate_mode=truncate_mode, \ color_threshold=color_threshold, \ get_leaves=get_leaves, \ orientation=orientation, \ labels=labels, \ count_sort=count_sort, \ distance_sort=distance_sort, \ show_leaf_counts=show_leaf_counts, \ i=2*n-2, iv=0.0, ivl=ivl, n=n, \ icoord_list=icoord_list, \ dcoord_list=dcoord_list, lvs=lvs, \ current_color=current_color, \ color_list=color_list, \ currently_below_threshold=currently_below_threshold, \ leaf_label_func=leaf_label_func, \ contraction_marks=contraction_marks, \ link_color_func=link_color_func) if not no_plot: mh = max(Z[:,2]) _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, no_labels, color_list, leaf_font_size=leaf_font_size, leaf_rotation=leaf_rotation, contraction_marks=contraction_marks) return R def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels): # If the leaf id structure is not None and is a list then the caller # to dendrogram has indicated that cluster id's corresponding to the # leaf nodes should be recorded. if lvs is not None: lvs.append(int(i)) # If leaf node labels are to be displayed... if ivl is not None: # If a leaf_label_func has been provided, the label comes from the # string returned from the leaf_label_func, which is a function # passed to dendrogram. if leaf_label_func: ivl.append(leaf_label_func(int(i))) else: # Otherwise, if the dendrogram caller has passed a labels list # for the leaf nodes, use it. if labels is not None: ivl.append(labels[int(i-n)]) else: # Otherwise, use the id as the label for the leaf.x ivl.append(str(int(i))) def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts): # If the leaf id structure is not None and is a list then the caller # to dendrogram has indicated that cluster id's corresponding to the # leaf nodes should be recorded. if lvs is not None: lvs.append(int(i)) if ivl is not None: if leaf_label_func: ivl.append(leaf_label_func(int(i))) else: if show_leaf_counts: ivl.append("(" + str(int(Z[i-n, 3])) + ")") else: ivl.append("") def _append_contraction_marks(Z, iv, i, n, contraction_marks): _append_contraction_marks_sub(Z, iv, Z[i-n, 0], n, contraction_marks) _append_contraction_marks_sub(Z, iv, Z[i-n, 1], n, contraction_marks) def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks): if (i >= n): contraction_marks.append((iv, Z[i-n, 2])) _append_contraction_marks_sub(Z, iv, Z[i-n, 0], n, contraction_marks) _append_contraction_marks_sub(Z, iv, Z[i-n, 1], n, contraction_marks) def _dendrogram_calculate_info(Z, p, truncate_mode, \ color_threshold=np.inf, get_leaves=True, \ orientation='top', labels=None, \ count_sort=False, distance_sort=False, \ show_leaf_counts=False, i=-1, iv=0.0, \ ivl=[], n=0, icoord_list=[], dcoord_list=[], \ lvs=None, mhr=False, \ current_color=[], color_list=[], \ currently_below_threshold=[], \ leaf_label_func=None, level=0, contraction_marks=None, link_color_func=None): """ Calculates the endpoints of the links as well as the labels for the the dendrogram rooted at the node with index i. iv is the independent variable value to plot the left-most leaf node below the root node i (if orientation='top', this would be the left-most x value where the plotting of this root node i and its descendents should begin). ivl is a list to store the labels of the leaf nodes. The leaf_label_func is called whenever ivl != None, labels == None, and leaf_label_func != None. When ivl != None and labels != None, the labels list is used only for labeling the the leaf nodes. When ivl == None, no labels are generated for leaf nodes. When get_leaves==True, a list of leaves is built as they are visited in the dendrogram. Returns a tuple with l being the independent variable coordinate that corresponds to the midpoint of cluster to the left of cluster i if i is non-singleton, otherwise the independent coordinate of the leaf node if i is a leaf node. Returns a tuple (left, w, h, md) * left is the independent variable coordinate of the center of the the U of the subtree * w is the amount of space used for the subtree (in independent variable units) * h is the height of the subtree in dependent variable units * md is the max(Z[*,2]) for all nodes * below and including the target node. """ if n == 0: raise ValueError("Invalid singleton cluster count n.") if i == -1: raise ValueError("Invalid root cluster index i.") if truncate_mode == 'lastp': # If the node is a leaf node but corresponds to a non-single cluster, # it's label is either the empty string or the number of original # observations belonging to cluster i. if i < 2*n-p and i >= n: d = Z[i-n, 2] _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts) if contraction_marks is not None: _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) return (iv + 5.0, 10.0, 0.0, d) elif i < n: _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels) return (iv + 5.0, 10.0, 0.0, 0.0) elif truncate_mode in ('mtica', 'level'): if i > n and level > p: d = Z[i-n, 2] _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts) if contraction_marks is not None: _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) return (iv + 5.0, 10.0, 0.0, d) elif i < n: _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels) return (iv + 5.0, 10.0, 0.0, 0.0) elif truncate_mode in ('mlab',): pass # Otherwise, only truncate if we have a leaf node. # # If the truncate_mode is mlab, the linkage has been modified # with the truncated tree. # # Only place leaves if they correspond to original observations. if i < n: _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels) return (iv + 5.0, 10.0, 0.0, 0.0) # !!! Otherwise, we don't have a leaf node, so work on plotting a # non-leaf node. # Actual indices of a and b aa = Z[i-n, 0] ab = Z[i-n, 1] if aa > n: # The number of singletons below cluster a na = Z[aa-n, 3] # The distance between a's two direct children. da = Z[aa-n, 2] else: na = 1 da = 0.0 if ab > n: nb = Z[ab-n, 3] db = Z[ab-n, 2] else: nb = 1 db = 0.0 if count_sort == 'ascending' or count_sort == True: # If a has a count greater than b, it and its descendents should # be drawn to the right. Otherwise, to the left. if na > nb: # The cluster index to draw to the left (ua) will be ab # and the one to draw to the right (ub) will be aa ua = ab ub = aa else: ua = aa ub = ab elif count_sort == 'descending': # If a has a count less than or equal to b, it and its # descendents should be drawn to the left. Otherwise, to # the right. if na > nb: ua = aa ub = ab else: ua = ab ub = aa elif distance_sort == 'ascending' or distance_sort == True: # If a has a distance greater than b, it and its descendents should # be drawn to the right. Otherwise, to the left. if da > db: ua = ab ub = aa else: ua = aa ub = ab elif distance_sort == 'descending': # If a has a distance less than or equal to b, it and its # descendents should be drawn to the left. Otherwise, to # the right. if da > db: ua = aa ub = ab else: ua = ab ub = aa else: ua = aa ub = ab # The distance of the cluster to draw to the left (ua) is uad # and its count is uan. Likewise, the cluster to draw to the # right has distance ubd and count ubn. if ua < n: uad = 0.0 uan = 1 else: uad = Z[ua-n, 2] uan = Z[ua-n, 3] if ub < n: ubd = 0.0 ubn = 1 else: ubd = Z[ub-n, 2] ubn = Z[ub-n, 3] # Updated iv variable and the amount of space used. (uiva, uwa, uah, uamd) = \ _dendrogram_calculate_info(Z=Z, p=p, \ truncate_mode=truncate_mode, \ color_threshold=color_threshold, \ get_leaves=get_leaves, \ orientation=orientation, \ labels=labels, \ count_sort=count_sort, \ distance_sort=distance_sort, \ show_leaf_counts=show_leaf_counts, \ i=ua, iv=iv, ivl=ivl, n=n, \ icoord_list=icoord_list, \ dcoord_list=dcoord_list, lvs=lvs, \ current_color=current_color, \ color_list=color_list, \ currently_below_threshold=currently_below_threshold, \ leaf_label_func=leaf_label_func, \ level=level+1, contraction_marks=contraction_marks, \ link_color_func=link_color_func) h = Z[i-n, 2] if h >= color_threshold or color_threshold <= 0: c = 'b' if currently_below_threshold[0]: current_color[0] = (current_color[0] + 1) % len(_link_line_colors) currently_below_threshold[0] = False else: currently_below_threshold[0] = True c = _link_line_colors[current_color[0]] (uivb, uwb, ubh, ubmd) = \ _dendrogram_calculate_info(Z=Z, p=p, \ truncate_mode=truncate_mode, \ color_threshold=color_threshold, \ get_leaves=get_leaves, \ orientation=orientation, \ labels=labels, \ count_sort=count_sort, \ distance_sort=distance_sort, \ show_leaf_counts=show_leaf_counts, \ i=ub, iv=iv+uwa, ivl=ivl, n=n, \ icoord_list=icoord_list, \ dcoord_list=dcoord_list, lvs=lvs, \ current_color=current_color, \ color_list=color_list, \ currently_below_threshold=currently_below_threshold, leaf_label_func=leaf_label_func, \ level=level+1, contraction_marks=contraction_marks, \ link_color_func=link_color_func) # The height of clusters a and b ah = uad bh = ubd max_dist = max(uamd, ubmd, h) icoord_list.append([uiva, uiva, uivb, uivb]) dcoord_list.append([uah, h, h, ubh]) if link_color_func is not None: v = link_color_func(int(i)) if type(v) != types.StringType: raise TypeError("link_color_func must return a matplotlib color string!") color_list.append(v) else: color_list.append(c) return ( ((uiva + uivb) / 2), uwa+uwb, h, max_dist) def is_isomorphic(T1, T2): r""" Determines if two different cluster assignments ``T1`` and ``T2`` are equivalent. :Arguments: - T1 : ndarray An assignment of singleton cluster ids to flat cluster ids. - T2 : ndarray An assignment of singleton cluster ids to flat cluster ids. :Returns: - b : boolean Whether the flat cluster assignments ``T1`` and ``T2`` are equivalent. """ T1 = np.asarray(T1, order='c') T2 = np.asarray(T2, order='c') if type(T1) != np.ndarray: raise TypeError('T1 must be a numpy array.') if type(T2) != np.ndarray: raise TypeError('T2 must be a numpy array.') T1S = T1.shape T2S = T2.shape if len(T1S) != 1: raise ValueError('T1 must be one-dimensional.') if len(T2S) != 1: raise ValueError('T2 must be one-dimensional.') if T1S[0] != T2S[0]: raise ValueError('T1 and T2 must have the same number of elements.') n = T1S[0] d = {} for i in xrange(0,n): if T1[i] in d.keys(): if d[T1[i]] != T2[i]: return False else: d[T1[i]] = T2[i] return True def maxdists(Z): r""" MD = maxdists(Z) Returns the maximum distance between any cluster for each non-singleton cluster. :Arguments: - Z : ndarray The hierarchical clustering encoded as a matrix. See ``linkage`` for more information. :Returns: - MD : ndarray A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents the maximum distance between any cluster (including singletons) below and including the node with index i. More specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the set of all node indices below and including node i. """ Z = np.asarray(Z, order='c', dtype=np.double) is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 MD = np.zeros((n-1,)) [Z] = _copy_arrays_if_base_present([Z]) _hierarchy_wrap.get_max_dist_for_each_cluster_wrap(Z, MD, int(n)) return MD def maxinconsts(Z, R): r""" Returns the maximum inconsistency coefficient for each non-singleton cluster and its descendents. :Arguments: - Z : ndarray The hierarchical clustering encoded as a matrix. See ``linkage`` for more information. - R : ndarray The inconsistency matrix. :Returns: - MI : ndarray A monotonic ``(n-1)``-sized numpy array of doubles. """ Z = np.asarray(Z, order='c') R = np.asarray(R, order='c') is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') n = Z.shape[0] + 1 if Z.shape[0] != R.shape[0]: raise ValueError("The inconsistency matrix and linkage matrix each have a different number of rows.") MI = np.zeros((n-1,)) [Z, R] = _copy_arrays_if_base_present([Z, R]) _hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MI, int(n), 3) return MI def maxRstat(Z, R, i): r""" Returns the maximum statistic for each non-singleton cluster and its descendents. :Arguments: - Z : ndarray The hierarchical clustering encoded as a matrix. See ``linkage`` for more information. - R : ndarray The inconsistency matrix. - i : int The column of ``R`` to use as the statistic. :Returns: - MR : ndarray Calculates the maximum statistic for the i'th column of the inconsistency matrix ``R`` for each non-singleton cluster node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where ``Q(j)`` the set of all node ids corresponding to nodes below and including ``j``. """ Z = np.asarray(Z, order='c') R = np.asarray(R, order='c') is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') if type(i) is not types.IntType: raise TypeError('The third argument must be an integer.') if i < 0 or i > 3: raise ValueError('i must be an integer between 0 and 3 inclusive.') if Z.shape[0] != R.shape[0]: raise ValueError("The inconsistency matrix and linkage matrix each have a different number of rows.") n = Z.shape[0] + 1 MR = np.zeros((n-1,)) [Z, R] = _copy_arrays_if_base_present([Z, R]) _hierarchy_wrap.get_max_Rfield_for_each_cluster_wrap(Z, R, MR, int(n), i) return MR def leaders(Z, T): r""" (L, M) = leaders(Z, T): Returns the root nodes in a hierarchical clustering corresponding to a cut defined by a flat cluster assignment vector ``T``. See the ``fcluster`` function for more information on the format of ``T``. For each flat cluster :math:`j` of the :math:`k` flat clusters represented in the n-sized flat cluster assignment vector ``T``, this function finds the lowest cluster node :math:`i` in the linkage tree Z such that: * leaf descendents belong only to flat cluster j (i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where :math:`S(i)` is the set of leaf ids of leaf nodes descendent with cluster node :math:`i`) * there does not exist a leaf that is not descendent with :math:`i` that also belongs to cluster :math:`j` (i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If this condition is violated, ``T`` is not a valid cluster assignment vector, and an exception will be thrown. :Arguments: - Z : ndarray The hierarchical clustering encoded as a matrix. See ``linkage`` for more information. - T : ndarray The flat cluster assignment vector. :Returns: (L, M) - L : ndarray The leader linkage node id's stored as a k-element 1D array where :math:`k` is the number of flat clusters found in ``T``. ``L[j]=i`` is the linkage cluster node id that is the leader of flat cluster with id M[j]. If ``i < n``, ``i`` corresponds to an original observation, otherwise it corresponds to a non-singleton cluster. For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with id 8's leader is linkage node 2. - M : ndarray The leader linkage node id's stored as a k-element 1D array where :math:`k` is the number of flat clusters found in ``T``. This allows the set of flat cluster ids to be any arbitrary set of :math:`k` integers. """ Z = np.asarray(Z, order='c') T = np.asarray(T, order='c') if type(T) != np.ndarray or T.dtype != 'i': raise TypeError('T must be a one-dimensional numpy array of integers.') is_valid_linkage(Z, throw=True, name='Z') if len(T) != Z.shape[0] + 1: raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') Cl = np.unique(T) kk = len(Cl) L = np.zeros((kk,), dtype='i') M = np.zeros((kk,), dtype='i') n = Z.shape[0] + 1 [Z, T] = _copy_arrays_if_base_present([Z, T]) s = _hierarchy_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n)) if s >= 0: raise ValueError('T is not a valid assignment vector. Error found when examining linkage node %d (< 2n-1).' % s) return (L, M) # These are test functions to help me test the leaders function. def _leaders_test(Z, T): tr = to_tree(Z) _leaders_test_recurs_mark(tr, T) return tr def _leader_identify(tr, T): if tr.is_leaf(): return T[tr.id] else: left = tr.get_left() right = tr.get_right() lfid = _leader_identify(left, T) rfid = _leader_identify(right, T) print 'ndid: %d lid: %d lfid: %d rid: %d rfid: %d' % (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid) if lfid != rfid: if lfid != -1: print 'leader: %d with tag %d' % (left.id, lfid) if rfid != -1: print 'leader: %d with tag %d' % (right.id, rfid) return -1 else: return lfid def _leaders_test_recurs_mark(tr, T): if tr.is_leaf(): tr.asgn = T[tr.id] else: tr.asgn = -1 _leaders_test_recurs_mark(tr.left, T) _leaders_test_recurs_mark(tr.right, T)
gpl-3.0
hdmetor/scikit-learn
examples/manifold/plot_manifold_sphere.py
258
5101
#!/usr/bin/python # -*- coding: utf-8 -*- """ ============================================= Manifold Learning methods on a severed sphere ============================================= An application of the different :ref:`manifold` techniques on a spherical data-set. Here one can see the use of dimensionality reduction in order to gain some intuition regarding the manifold learning methods. Regarding the dataset, the poles are cut from the sphere, as well as a thin slice down its side. This enables the manifold learning techniques to 'spread it open' whilst projecting it onto two dimensions. For a similar example, where the methods are applied to the S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py` Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is to find a low-dimensional representation of the data (here 2D) in which the distances respect well the distances in the original high-dimensional space, unlike other manifold-learning algorithms, it does not seeks an isotropic representation of the data in the low-dimensional space. Here the manifold problem matches fairly that of representing a flat map of the Earth, as with `map projection <http://en.wikipedia.org/wiki/Map_projection>`_ """ # Author: Jaques Grobler <[email protected]> # License: BSD 3 clause print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter from sklearn import manifold from sklearn.utils import check_random_state # Next line to silence pyflakes. Axes3D # Variables for manifold learning. n_neighbors = 10 n_samples = 1000 # Create our sphere. random_state = check_random_state(0) p = random_state.rand(n_samples) * (2 * np.pi - 0.55) t = random_state.rand(n_samples) * np.pi # Sever the poles from the sphere. indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8)))) colors = p[indices] x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \ np.sin(t[indices]) * np.sin(p[indices]), \ np.cos(t[indices]) # Plot our dataset. fig = plt.figure(figsize=(15, 8)) plt.suptitle("Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14) ax = fig.add_subplot(251, projection='3d') ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow) try: # compatibility matplotlib < 1.0 ax.view_init(40, -10) except: pass sphere_data = np.array([x, y, z]).T # Perform Locally Linear Embedding Manifold learning methods = ['standard', 'ltsa', 'hessian', 'modified'] labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE'] for i, method in enumerate(methods): t0 = time() trans_data = manifold\ .LocallyLinearEmbedding(n_neighbors, 2, method=method).fit_transform(sphere_data).T t1 = time() print("%s: %.2g sec" % (methods[i], t1 - t0)) ax = fig.add_subplot(252 + i) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % (labels[i], t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Isomap Manifold learning. t0 = time() trans_data = manifold.Isomap(n_neighbors, n_components=2)\ .fit_transform(sphere_data).T t1 = time() print("%s: %.2g sec" % ('ISO', t1 - t0)) ax = fig.add_subplot(257) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Multi-dimensional scaling. t0 = time() mds = manifold.MDS(2, max_iter=100, n_init=1) trans_data = mds.fit_transform(sphere_data).T t1 = time() print("MDS: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(258) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("MDS (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Spectral Embedding. t0 = time() se = manifold.SpectralEmbedding(n_components=2, n_neighbors=n_neighbors) trans_data = se.fit_transform(sphere_data).T t1 = time() print("Spectral Embedding: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(259) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform t-distributed stochastic neighbor embedding. t0 = time() tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) trans_data = tsne.fit_transform(sphere_data).T t1 = time() print("t-SNE: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(250) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("t-SNE (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') plt.show()
bsd-3-clause
tjtorres/SentiMap
Stream_Mod.py
1
6709
from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream from microsofttranslator import Translator import re import time import json import codecs import csv import sys from pymongo import MongoClient, Connection from datetime import datetime import gensim as gs import numpy as np from sklearn.naive_bayes import GaussianNB from sklearn.externals import joblib from nltk.corpus import stopwords from setproctitle import * setproctitle('Stream_Mod') ################################################################################### ####################Variables that contain the user credentials for the Twitter API access_token = "ACCESS_TOKEN" access_token_secret = "ACCESS_TOKEN_SECRET" consumer_key = "CONSUMER_KEY" consumer_secret = "CONSUMER_SECRET" #################################################################################### #################################################################################### ###############Variables that contain the user credentials for the Azure Translator client_ID = "CLIENT_ID" client_secret="CLIENT_SECRET" #################################################################################### #RE's for stripping unicode emoji and multiple letter instances. emoji = re.compile(u'[^\x00-\x7F\x80-\xFF\u0100-\u017F\u0180-\u024F\u1E00-\u1EFF]',re.UNICODE) multiple = re.compile(r"(.)\1{1,}", re.DOTALL) #Microsoft Translator Instance translator = Translator(client_ID, client_secret ) #################################################################################### ############################### Database Setup ##################################### #################################################################################### client = Connection() db = client.streamer if 'tweets' in db.collection_names(): print "Collection 'tweets' already exists" coll = db.tweets else: print "Creating Collection: 'tweets' ... " db.create_collection('tweets',capped=True, size=200000, max= 3000 ) coll = db.tweets ######################################################################## #Takes three arguments #INPUT: (phrase=string-to-vectorize, stop=set-of-stopwords, model=loaded-word2vec-model) #OUTPUT: Average of word vectors in string. def phrase2vec(phrase,stop, model): phrase = phrase.lower().split() phrase_fil = [w for w in phrase if not w in stop] size = 0 vec = np.zeros(300) for word in phrase_fil: try: vec= np.add(vec,model[word]) size+=1 except: pass if size==0: size=1 return np.divide(vec,size) #Takes 4 arguments #INPUT: (text=String to sentimentize, stop= stopword set, model= w2v-model, trained_classifier = sklearn classifier model) def get_sentiment(text, stop , model, trained_classifier): cl = trained_classifier vec = phrase2vec(text,stop,model) pred = cl.predict_proba(vec)[0][1] return pred #Twitter API Listener Class, inherits from tweepy.streaming.StreamListener class Listener(StreamListener): def __init__(self, classifier, stops, model): self.cl = classifier self.stop = stops self.model = model def on_data(self, data): #parse json from data event elem = json.loads(data) #### If geotagged filter text #### if 'coordinates' in elem: if elem['coordinates']!=None: #filter off emoji, urls, @Names, &amp; (etc.), symbols, "RT", and '#' stripped = emoji.sub('',elem['text']) stripped = re.sub(r'http[s]?[^\s]+','', stripped) stripped = re.sub(r'(@[A-Za-z0-9\_]+)' , "" ,stripped) stripped = re.sub(r'[\&].*;','',stripped) stripped = re.sub(r'[#|\!|\-|\+|:|//]', " ", stripped) stripped = re.sub( 'RT.','', stripped) stripped = re.sub('[\s]+' ,' ', stripped).strip() #print stripped #### Translate in not in English #### if elem['lang'] != 'en' and elem['lang'] != 'en-gb': try: translated_text = translator.translate(stripped , 'en', elem['lang']) except: translated_text = "" else: translated_text = stripped #Tweet must be longer than 2 words after filtering. if len(translated_text.split()) > 2: sentiment = get_sentiment(translated_text,self.stop ,self.model, self.cl) #after translation filter down multiple letters to 2, no non-latin alphanumric characters output = multiple.sub(r"\1\1", translated_text) output = re.sub('[^a-zA-Z0-9|\']', " ", output).strip() lat = elem['coordinates']['coordinates'][1] lon = elem['coordinates']['coordinates'][0] text = elem['text'].strip() trans = output #setup message containing cooordinates, unfiltered text, translated and filtered text, #time, and sentiment value (0-1). message = {'lon': lon, 'lat': lat , 'text': text, 'trans': trans ,\ 'time':datetime.utcnow(), 'sent': sentiment } #add message to mongo collection. coll.insert(message) return True def on_error(self, status): print status if __name__ == '__main__': print "Loading Classifier...\n\n" #load trained sklearn classifier cl=joblib.load('./static/Data/PKL/RFC/random_forest_avg.pkl') print "Classifier Loaded...loading model...\n\n" #load w2v vectors from GoogleNews training set. model= gs.models.Word2Vec.load_word2vec_format('./static/Data/GoogleNews-vectors-negative300.bin', binary=True) print "Model Loaded...\n\n" #load set of stop words stop_set = set(stopwords.words("english")) l = Listener(cl,stop_set,model) #Oauth Handling auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) stream = Stream(auth, l) print "Listening..." stream.sample()
mit
icdishb/scikit-learn
benchmarks/bench_sgd_regression.py
283
5569
""" Benchmark for SGD regression Compares SGD regression against coordinate descent and Ridge on synthetic data. """ print(__doc__) # Author: Peter Prettenhofer <[email protected]> # License: BSD 3 clause import numpy as np import pylab as pl import gc from time import time from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet from sklearn.metrics import mean_squared_error from sklearn.datasets.samples_generator import make_regression if __name__ == "__main__": list_n_samples = np.linspace(100, 10000, 5).astype(np.int) list_n_features = [10, 100, 1000] n_test = 1000 noise = 0.1 alpha = 0.01 sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2)) for i, n_train in enumerate(list_n_samples): for j, n_features in enumerate(list_n_features): X, y, coef = make_regression( n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True) X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] print("=======================") print("Round %d %d" % (i, j)) print("n_features:", n_features) print("n_samples:", n_train) # Shuffle data idx = np.arange(n_train) np.random.seed(13) np.random.shuffle(idx) X_train = X_train[idx] y_train = y_train[idx] std = X_train.std(axis=0) mean = X_train.mean(axis=0) X_train = (X_train - mean) / std X_test = (X_test - mean) / std std = y_train.std(axis=0) mean = y_train.mean(axis=0) y_train = (y_train - mean) / std y_test = (y_test - mean) / std gc.collect() print("- benchmarking ElasticNet") clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False) tstart = time() clf.fit(X_train, y_train) elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) elnet_results[i, j, 1] = time() - tstart gc.collect() print("- benchmarking SGD") n_iter = np.ceil(10 ** 4.0 / n_train) clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False, n_iter=n_iter, learning_rate="invscaling", eta0=.01, power_t=0.25) tstart = time() clf.fit(X_train, y_train) sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) sgd_results[i, j, 1] = time() - tstart gc.collect() print("n_iter", n_iter) print("- benchmarking A-SGD") n_iter = np.ceil(10 ** 4.0 / n_train) clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False, n_iter=n_iter, learning_rate="invscaling", eta0=.002, power_t=0.05, average=(n_iter * n_train // 2)) tstart = time() clf.fit(X_train, y_train) asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) asgd_results[i, j, 1] = time() - tstart gc.collect() print("- benchmarking RidgeRegression") clf = Ridge(alpha=alpha, fit_intercept=False) tstart = time() clf.fit(X_train, y_train) ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test) ridge_results[i, j, 1] = time() - tstart # Plot results i = 0 m = len(list_n_features) pl.figure('scikit-learn SGD regression benchmark results', figsize=(5 * 2, 4 * m)) for j in range(m): pl.subplot(m, 2, i + 1) pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]), label="ElasticNet") pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]), label="SGDRegressor") pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]), label="A-SGDRegressor") pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]), label="Ridge") pl.legend(prop={"size": 10}) pl.xlabel("n_train") pl.ylabel("RMSE") pl.title("Test error - %d features" % list_n_features[j]) i += 1 pl.subplot(m, 2, i + 1) pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]), label="ElasticNet") pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]), label="SGDRegressor") pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]), label="A-SGDRegressor") pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]), label="Ridge") pl.legend(prop={"size": 10}) pl.xlabel("n_train") pl.ylabel("Time [sec]") pl.title("Training time - %d features" % list_n_features[j]) i += 1 pl.subplots_adjust(hspace=.30) pl.show()
bsd-3-clause
RomainBrault/scikit-learn
sklearn/decomposition/dict_learning.py
7
47390
""" Dictionary learning """ from __future__ import print_function # Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort # License: BSD 3 clause import time import sys import itertools from math import sqrt, ceil import numpy as np from scipy import linalg from numpy.lib.stride_tricks import as_strided from ..base import BaseEstimator, TransformerMixin from ..externals.joblib import Parallel, delayed, cpu_count from ..externals.six.moves import zip from ..utils import (check_array, check_random_state, gen_even_slices, gen_batches, _get_n_jobs) from ..utils.extmath import randomized_svd, row_norms from ..utils.validation import check_is_fitted from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars', regularization=None, copy_cov=True, init=None, max_iter=1000, check_input=True, verbose=0): """Generic sparse coding Each column of the result is the solution to a Lasso problem. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. dictionary : array of shape (n_components, n_features) The dictionary matrix against which to solve the sparse coding of the data. Some of the algorithms assume normalized rows. gram : None | array, shape=(n_components, n_components) Precomputed Gram matrix, dictionary * dictionary' gram can be None if method is 'threshold'. cov : array, shape=(n_components, n_samples) Precomputed covariance, dictionary * X' algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'} lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than regularization from the projection dictionary * data' regularization : int | float The regularization parameter. It corresponds to alpha when algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'. Otherwise it corresponds to n_nonzero_coefs. init : array of shape (n_samples, n_components) Initialization value of the sparse code. Only used if `algorithm='lasso_cd'`. max_iter : int, 1000 by default Maximum number of iterations to perform if `algorithm='lasso_cd'`. copy_cov : boolean, optional Whether to copy the precomputed covariance matrix; if False, it may be overwritten. check_input : boolean, optional If False, the input arrays X and dictionary will not be checked. verbose : int Controls the verbosity; the higher, the more messages. Defaults to 0. Returns ------- code : array of shape (n_components, n_features) The sparse codes See also -------- sklearn.linear_model.lars_path sklearn.linear_model.orthogonal_mp sklearn.linear_model.Lasso SparseCoder """ if X.ndim == 1: X = X[:, np.newaxis] n_samples, n_features = X.shape if cov is None and algorithm != 'lasso_cd': # overwriting cov is safe copy_cov = False cov = np.dot(dictionary, X.T) if algorithm == 'lasso_lars': alpha = float(regularization) / n_features # account for scaling try: err_mgt = np.seterr(all='ignore') # Not passing in verbose=max(0, verbose-1) because Lars.fit already # corrects the verbosity level. lasso_lars = LassoLars(alpha=alpha, fit_intercept=False, verbose=verbose, normalize=False, precompute=gram, fit_path=False) lasso_lars.fit(dictionary.T, X.T, Xy=cov) new_code = lasso_lars.coef_ finally: np.seterr(**err_mgt) elif algorithm == 'lasso_cd': alpha = float(regularization) / n_features # account for scaling # TODO: Make verbosity argument for Lasso? # sklearn.linear_model.coordinate_descent.enet_path has a verbosity # argument that we could pass in from Lasso. clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False, precompute=gram, max_iter=max_iter, warm_start=True) if init is not None: clf.coef_ = init clf.fit(dictionary.T, X.T, check_input=check_input) new_code = clf.coef_ elif algorithm == 'lars': try: err_mgt = np.seterr(all='ignore') # Not passing in verbose=max(0, verbose-1) because Lars.fit already # corrects the verbosity level. lars = Lars(fit_intercept=False, verbose=verbose, normalize=False, precompute=gram, n_nonzero_coefs=int(regularization), fit_path=False) lars.fit(dictionary.T, X.T, Xy=cov) new_code = lars.coef_ finally: np.seterr(**err_mgt) elif algorithm == 'threshold': new_code = ((np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T) elif algorithm == 'omp': # TODO: Should verbose argument be passed to this? new_code = orthogonal_mp_gram( Gram=gram, Xy=cov, n_nonzero_coefs=int(regularization), tol=None, norms_squared=row_norms(X, squared=True), copy_Xy=copy_cov).T else: raise ValueError('Sparse coding method must be "lasso_lars" ' '"lasso_cd", "lasso", "threshold" or "omp", got %s.' % algorithm) return new_code # XXX : could be moved to the linear_model module def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars', n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None, max_iter=1000, n_jobs=1, check_input=True, verbose=0): """Sparse coding Each row of the result is the solution to a sparse coding problem. The goal is to find a sparse array `code` such that:: X ~= code * dictionary Read more in the :ref:`User Guide <SparseCoder>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix dictionary : array of shape (n_components, n_features) The dictionary matrix against which to solve the sparse coding of the data. Some of the algorithms assume normalized rows for meaningful output. gram : array, shape=(n_components, n_components) Precomputed Gram matrix, dictionary * dictionary' cov : array, shape=(n_components, n_samples) Precomputed covariance, dictionary' * X algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'} lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection dictionary * X' n_nonzero_coefs : int, 0.1 * n_features by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. init : array of shape (n_samples, n_components) Initialization value of the sparse codes. Only used if `algorithm='lasso_cd'`. max_iter : int, 1000 by default Maximum number of iterations to perform if `algorithm='lasso_cd'`. copy_cov : boolean, optional Whether to copy the precomputed covariance matrix; if False, it may be overwritten. n_jobs : int, optional Number of parallel jobs to run. check_input : boolean, optional If False, the input arrays X and dictionary will not be checked. verbose : int, optional Controls the verbosity; the higher, the more messages. Defaults to 0. Returns ------- code : array of shape (n_samples, n_components) The sparse codes See also -------- sklearn.linear_model.lars_path sklearn.linear_model.orthogonal_mp sklearn.linear_model.Lasso SparseCoder """ if check_input: if algorithm == 'lasso_cd': dictionary = check_array(dictionary, order='C', dtype='float64') X = check_array(X, order='C', dtype='float64') else: dictionary = check_array(dictionary) X = check_array(X) n_samples, n_features = X.shape n_components = dictionary.shape[0] if gram is None and algorithm != 'threshold': gram = np.dot(dictionary, dictionary.T) if cov is None and algorithm != 'lasso_cd': copy_cov = False cov = np.dot(dictionary, X.T) if algorithm in ('lars', 'omp'): regularization = n_nonzero_coefs if regularization is None: regularization = min(max(n_features / 10, 1), n_components) else: regularization = alpha if regularization is None: regularization = 1. if n_jobs == 1 or algorithm == 'threshold': code = _sparse_encode(X, dictionary, gram, cov=cov, algorithm=algorithm, regularization=regularization, copy_cov=copy_cov, init=init, max_iter=max_iter, check_input=False, verbose=verbose) # This ensure that dimensionality of code is always 2, # consistant with the case n_jobs > 1 if code.ndim == 1: code = code[np.newaxis, :] return code # Enter parallel code block code = np.empty((n_samples, n_components)) slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs))) code_views = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_sparse_encode)( X[this_slice], dictionary, gram, cov[:, this_slice] if cov is not None else None, algorithm, regularization=regularization, copy_cov=copy_cov, init=init[this_slice] if init is not None else None, max_iter=max_iter, check_input=False) for this_slice in slices) for this_slice, this_view in zip(slices, code_views): code[this_slice] = this_view return code def _update_dict(dictionary, Y, code, verbose=False, return_r2=False, random_state=None): """Update the dense dictionary factor in place. Parameters ---------- dictionary : array of shape (n_features, n_components) Value of the dictionary at the previous iteration. Y : array of shape (n_features, n_samples) Data matrix. code : array of shape (n_components, n_samples) Sparse coding of the data against which to optimize the dictionary. verbose: Degree of output the procedure will print. return_r2 : bool Whether to compute and return the residual sum of squares corresponding to the computed solution. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- dictionary : array of shape (n_features, n_components) Updated dictionary. """ n_components = len(code) n_samples = Y.shape[0] random_state = check_random_state(random_state) # Residuals, computed 'in-place' for efficiency R = -np.dot(dictionary, code) R += Y R = np.asfortranarray(R) ger, = linalg.get_blas_funcs(('ger',), (dictionary, code)) for k in range(n_components): # R <- 1.0 * U_k * V_k^T + R R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True) dictionary[:, k] = np.dot(R, code[k, :].T) # Scale k'th atom atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k]) if atom_norm_square < 1e-20: if verbose == 1: sys.stdout.write("+") sys.stdout.flush() elif verbose: print("Adding new random atom") dictionary[:, k] = random_state.randn(n_samples) # Setting corresponding coefs to 0 code[k, :] = 0.0 dictionary[:, k] /= sqrt(np.dot(dictionary[:, k], dictionary[:, k])) else: dictionary[:, k] /= sqrt(atom_norm_square) # R <- -1.0 * U_k * V_k^T + R R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True) if return_r2: R **= 2 # R is fortran-ordered. For numpy version < 1.6, sum does not # follow the quick striding first, and is thus inefficient on # fortran ordered data. We take a flat view of the data with no # striding R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,)) R = np.sum(R) return dictionary, R return dictionary def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8, method='lars', n_jobs=1, dict_init=None, code_init=None, callback=None, verbose=False, random_state=None, return_n_iter=False): """Solves a dictionary learning matrix factorization problem. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. n_components : int, Number of dictionary atoms to extract. alpha : int, Sparsity controlling parameter. max_iter : int, Maximum number of iterations to perform. tol : float, Tolerance for the stopping condition. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. n_jobs : int, Number of parallel jobs to run, or -1 to autodetect. dict_init : array of shape (n_components, n_features), Initial value for the dictionary for warm restart scenarios. code_init : array of shape (n_samples, n_components), Initial value for the sparse code for warm restart scenarios. callback : Callable that gets invoked every five iterations. verbose : Degree of output the procedure will print. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_n_iter : bool Whether or not to return the number of iterations. Returns ------- code : array of shape (n_samples, n_components) The sparse code factor in the matrix factorization. dictionary : array of shape (n_components, n_features), The dictionary factor in the matrix factorization. errors : array Vector of errors at each iteration. n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to True. See also -------- dict_learning_online DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ if method not in ('lars', 'cd'): raise ValueError('Coding method %r not supported as a fit algorithm.' % method) method = 'lasso_' + method t0 = time.time() # Avoid integer division problems alpha = float(alpha) random_state = check_random_state(random_state) if n_jobs == -1: n_jobs = cpu_count() # Init the code and the dictionary with SVD of Y if code_init is not None and dict_init is not None: code = np.array(code_init, order='F') # Don't copy V, it will happen below dictionary = dict_init else: code, S, dictionary = linalg.svd(X, full_matrices=False) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: # True even if n_components=None code = code[:, :n_components] dictionary = dictionary[:n_components, :] else: code = np.c_[code, np.zeros((len(code), n_components - r))] dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]))] # Fortran-order dict, as we are going to access its row vectors dictionary = np.array(dictionary, order='F') residuals = 0 errors = [] current_cost = np.nan if verbose == 1: print('[dict_learning]', end=' ') # If max_iter is 0, number of iterations returned should be zero ii = -1 for ii in range(max_iter): dt = (time.time() - t0) if verbose == 1: sys.stdout.write(".") sys.stdout.flush() elif verbose: print("Iteration % 3i " "(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)" % (ii, dt, dt / 60, current_cost)) # Update code code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha, init=code, n_jobs=n_jobs) # Update dictionary dictionary, residuals = _update_dict(dictionary.T, X.T, code.T, verbose=verbose, return_r2=True, random_state=random_state) dictionary = dictionary.T # Cost function current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code)) errors.append(current_cost) if ii > 0: dE = errors[-2] - errors[-1] # assert(dE >= -tol * errors[-1]) if dE < tol * errors[-1]: if verbose == 1: # A line return print("") elif verbose: print("--- Convergence reached after %d iterations" % ii) break if ii % 5 == 0 and callback is not None: callback(locals()) if return_n_iter: return code, dictionary, errors, ii + 1 else: return code, dictionary, errors def dict_learning_online(X, n_components=2, alpha=1, n_iter=100, return_code=True, dict_init=None, callback=None, batch_size=3, verbose=False, shuffle=True, n_jobs=1, method='lars', iter_offset=0, random_state=None, return_inner_stats=False, inner_stats=None, return_n_iter=False): """Solves a dictionary learning matrix factorization problem online. Finds the best dictionary and the corresponding sparse code for approximating the data matrix X by solving:: (U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components where V is the dictionary and U is the sparse code. This is accomplished by repeatedly iterating over mini-batches by slicing the input data. Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- X : array of shape (n_samples, n_features) Data matrix. n_components : int, Number of dictionary atoms to extract. alpha : float, Sparsity controlling parameter. n_iter : int, Number of iterations to perform. return_code : boolean, Whether to also return the code U or just the dictionary V. dict_init : array of shape (n_components, n_features), Initial value for the dictionary for warm restart scenarios. callback : Callable that gets invoked every five iterations. batch_size : int, The number of samples to take in each batch. verbose : Degree of output the procedure will print. shuffle : boolean, Whether to shuffle the data before splitting it in batches. n_jobs : int, Number of parallel jobs to run, or -1 to autodetect. method : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. iter_offset : int, default 0 Number of previous iterations completed on the dictionary used for initialization. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. return_inner_stats : boolean, optional Return the inner statistics A (dictionary covariance) and B (data approximation). Useful to restart the algorithm in an online setting. If return_inner_stats is True, return_code is ignored inner_stats : tuple of (A, B) ndarrays Inner sufficient statistics that are kept by the algorithm. Passing them at initialization is useful in online settings, to avoid loosing the history of the evolution. A (n_components, n_components) is the dictionary covariance matrix. B (n_features, n_components) is the data approximation matrix return_n_iter : bool Whether or not to return the number of iterations. Returns ------- code : array of shape (n_samples, n_components), the sparse code (only returned if `return_code=True`) dictionary : array of shape (n_components, n_features), the solutions to the dictionary learning problem n_iter : int Number of iterations run. Returned only if `return_n_iter` is set to `True`. See also -------- dict_learning DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ if n_components is None: n_components = X.shape[1] if method not in ('lars', 'cd'): raise ValueError('Coding method not supported as a fit algorithm.') method = 'lasso_' + method t0 = time.time() n_samples, n_features = X.shape # Avoid integer division problems alpha = float(alpha) random_state = check_random_state(random_state) if n_jobs == -1: n_jobs = cpu_count() # Init V with SVD of X if dict_init is not None: dictionary = dict_init else: _, S, dictionary = randomized_svd(X, n_components, random_state=random_state) dictionary = S[:, np.newaxis] * dictionary r = len(dictionary) if n_components <= r: dictionary = dictionary[:n_components, :] else: dictionary = np.r_[dictionary, np.zeros((n_components - r, dictionary.shape[1]))] if verbose == 1: print('[dict_learning]', end=' ') if shuffle: X_train = X.copy() random_state.shuffle(X_train) else: X_train = X dictionary = check_array(dictionary.T, order='F', dtype=np.float64, copy=False) X_train = check_array(X_train, order='C', dtype=np.float64, copy=False) batches = gen_batches(n_samples, batch_size) batches = itertools.cycle(batches) # The covariance of the dictionary if inner_stats is None: A = np.zeros((n_components, n_components)) # The data approximation B = np.zeros((n_features, n_components)) else: A = inner_stats[0].copy() B = inner_stats[1].copy() # If n_iter is zero, we need to return zero. ii = iter_offset - 1 for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches): this_X = X_train[batch] dt = (time.time() - t0) if verbose == 1: sys.stdout.write(".") sys.stdout.flush() elif verbose: if verbose > 10 or ii % ceil(100. / verbose) == 0: print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)" % (ii, dt, dt / 60)) this_code = sparse_encode(this_X, dictionary.T, algorithm=method, alpha=alpha, n_jobs=n_jobs).T # Update the auxiliary variables if ii < batch_size - 1: theta = float((ii + 1) * batch_size) else: theta = float(batch_size ** 2 + ii + 1 - batch_size) beta = (theta + 1 - batch_size) / (theta + 1) A *= beta A += np.dot(this_code, this_code.T) B *= beta B += np.dot(this_X.T, this_code.T) # Update dictionary dictionary = _update_dict(dictionary, B, A, verbose=verbose, random_state=random_state) # XXX: Can the residuals be of any use? # Maybe we need a stopping criteria based on the amount of # modification in the dictionary if callback is not None: callback(locals()) if return_inner_stats: if return_n_iter: return dictionary.T, (A, B), ii - iter_offset + 1 else: return dictionary.T, (A, B) if return_code: if verbose > 1: print('Learning code...', end=' ') elif verbose == 1: print('|', end=' ') code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha, n_jobs=n_jobs, check_input=False) if verbose > 1: dt = (time.time() - t0) print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60)) if return_n_iter: return code, dictionary.T, ii - iter_offset + 1 else: return code, dictionary.T if return_n_iter: return dictionary.T, ii - iter_offset + 1 else: return dictionary.T class SparseCodingMixin(TransformerMixin): """Sparse coding mixin""" def _set_sparse_coding_params(self, n_components, transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, split_sign=False, n_jobs=1): self.n_components = n_components self.transform_algorithm = transform_algorithm self.transform_n_nonzero_coefs = transform_n_nonzero_coefs self.transform_alpha = transform_alpha self.split_sign = split_sign self.n_jobs = n_jobs def transform(self, X, y=None): """Encode the data as a sparse combination of the dictionary atoms. Coding method is determined by the object parameter `transform_algorithm`. Parameters ---------- X : array of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : array, shape (n_samples, n_components) Transformed data """ check_is_fitted(self, 'components_') # XXX : kwargs is not documented X = check_array(X) n_samples, n_features = X.shape code = sparse_encode( X, self.components_, algorithm=self.transform_algorithm, n_nonzero_coefs=self.transform_n_nonzero_coefs, alpha=self.transform_alpha, n_jobs=self.n_jobs) if self.split_sign: # feature vector is split into a positive and negative side n_samples, n_features = code.shape split_code = np.empty((n_samples, 2 * n_features)) split_code[:, :n_features] = np.maximum(code, 0) split_code[:, n_features:] = -np.minimum(code, 0) code = split_code return code class SparseCoder(BaseEstimator, SparseCodingMixin): """Sparse coding Finds a sparse representation of data against a fixed, precomputed dictionary. Each row of the result is the solution to a sparse coding problem. The goal is to find a sparse array `code` such that:: X ~= code * dictionary Read more in the :ref:`User Guide <SparseCoder>`. Parameters ---------- dictionary : array, [n_components, n_features] The dictionary atoms used for sparse coding. Lines are assumed to be normalized to unit norm. transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ 'threshold'} Algorithm used to transform the data: lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection ``dictionary * X'`` transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. transform_alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. split_sign : bool, False by default Whether to split the sparse feature vector into the concatenation of its negative part and its positive part. This can improve the performance of downstream classifiers. n_jobs : int, number of parallel jobs to run Attributes ---------- components_ : array, [n_components, n_features] The unchanged dictionary atoms See also -------- DictionaryLearning MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA sparse_encode """ def __init__(self, dictionary, transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, split_sign=False, n_jobs=1): self._set_sparse_coding_params(dictionary.shape[0], transform_algorithm, transform_n_nonzero_coefs, transform_alpha, split_sign, n_jobs) self.components_ = dictionary def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ return self class DictionaryLearning(BaseEstimator, SparseCodingMixin): """Dictionary learning Finds a dictionary (a set of atoms) that can best be used to represent data using a sparse code. Solves the optimization problem:: (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- n_components : int, number of dictionary elements to extract alpha : float, sparsity controlling parameter max_iter : int, maximum number of iterations to perform tol : float, tolerance for numerical error fit_algorithm : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. .. versionadded:: 0.17 *cd* coordinate descent method to improve speed. transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ 'threshold'} Algorithm used to transform the data lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection ``dictionary * X'`` .. versionadded:: 0.17 *lasso_cd* coordinate descent method to improve speed. transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. transform_alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. split_sign : bool, False by default Whether to split the sparse feature vector into the concatenation of its negative part and its positive part. This can improve the performance of downstream classifiers. n_jobs : int, number of parallel jobs to run code_init : array of shape (n_samples, n_components), initial value for the code, for warm restart dict_init : array of shape (n_components, n_features), initial values for the dictionary, for warm restart verbose : degree of verbosity of the printed output random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, [n_components, n_features] dictionary atoms extracted from the data error_ : array vector of errors at each iteration n_iter_ : int Number of iterations run. Notes ----- **References:** J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf) See also -------- SparseCoder MiniBatchDictionaryLearning SparsePCA MiniBatchSparsePCA """ def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8, fit_algorithm='lars', transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, n_jobs=1, code_init=None, dict_init=None, verbose=False, split_sign=False, random_state=None): self._set_sparse_coding_params(n_components, transform_algorithm, transform_n_nonzero_coefs, transform_alpha, split_sign, n_jobs) self.alpha = alpha self.max_iter = max_iter self.tol = tol self.fit_algorithm = fit_algorithm self.code_init = code_init self.dict_init = dict_init self.verbose = verbose self.random_state = random_state def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the object itself """ random_state = check_random_state(self.random_state) X = check_array(X) if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components V, U, E, self.n_iter_ = dict_learning( X, n_components, self.alpha, tol=self.tol, max_iter=self.max_iter, method=self.fit_algorithm, n_jobs=self.n_jobs, code_init=self.code_init, dict_init=self.dict_init, verbose=self.verbose, random_state=random_state, return_n_iter=True) self.components_ = U self.error_ = E return self class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin): """Mini-batch dictionary learning Finds a dictionary (a set of atoms) that can best be used to represent data using a sparse code. Solves the optimization problem:: (U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1 (U,V) with || V_k ||_2 = 1 for all 0 <= k < n_components Read more in the :ref:`User Guide <DictionaryLearning>`. Parameters ---------- n_components : int, number of dictionary elements to extract alpha : float, sparsity controlling parameter n_iter : int, total number of iterations to perform fit_algorithm : {'lars', 'cd'} lars: uses the least angle regression method to solve the lasso problem (linear_model.lars_path) cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). Lars will be faster if the estimated components are sparse. transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \ 'threshold'} Algorithm used to transform the data. lars: uses the least angle regression method (linear_model.lars_path) lasso_lars: uses Lars to compute the Lasso solution lasso_cd: uses the coordinate descent method to compute the Lasso solution (linear_model.Lasso). lasso_lars will be faster if the estimated components are sparse. omp: uses orthogonal matching pursuit to estimate the sparse solution threshold: squashes to zero all coefficients less than alpha from the projection dictionary * X' transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default Number of nonzero coefficients to target in each column of the solution. This is only used by `algorithm='lars'` and `algorithm='omp'` and is overridden by `alpha` in the `omp` case. transform_alpha : float, 1. by default If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the penalty applied to the L1 norm. If `algorithm='threshold'`, `alpha` is the absolute value of the threshold below which coefficients will be squashed to zero. If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of the reconstruction error targeted. In this case, it overrides `n_nonzero_coefs`. split_sign : bool, False by default Whether to split the sparse feature vector into the concatenation of its negative part and its positive part. This can improve the performance of downstream classifiers. n_jobs : int, number of parallel jobs to run dict_init : array of shape (n_components, n_features), initial value of the dictionary for warm restart scenarios verbose : degree of verbosity of the printed output batch_size : int, number of samples in each mini-batch shuffle : bool, whether to shuffle the samples before forming batches random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- components_ : array, [n_components, n_features] components extracted from the data inner_stats_ : tuple of (A, B) ndarrays Internal sufficient statistics that are kept by the algorithm. Keeping them is useful in online settings, to avoid loosing the history of the evolution, but they shouldn't have any use for the end user. A (n_components, n_components) is the dictionary covariance matrix. B (n_features, n_components) is the data approximation matrix n_iter_ : int Number of iterations run. Notes ----- **References:** J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf) See also -------- SparseCoder DictionaryLearning SparsePCA MiniBatchSparsePCA """ def __init__(self, n_components=None, alpha=1, n_iter=1000, fit_algorithm='lars', n_jobs=1, batch_size=3, shuffle=True, dict_init=None, transform_algorithm='omp', transform_n_nonzero_coefs=None, transform_alpha=None, verbose=False, split_sign=False, random_state=None): self._set_sparse_coding_params(n_components, transform_algorithm, transform_n_nonzero_coefs, transform_alpha, split_sign, n_jobs) self.alpha = alpha self.n_iter = n_iter self.fit_algorithm = fit_algorithm self.dict_init = dict_init self.verbose = verbose self.shuffle = shuffle self.batch_size = batch_size self.split_sign = split_sign self.random_state = random_state def fit(self, X, y=None): """Fit the model from data in X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ random_state = check_random_state(self.random_state) X = check_array(X) U, (A, B), self.n_iter_ = dict_learning_online( X, self.n_components, self.alpha, n_iter=self.n_iter, return_code=False, method=self.fit_algorithm, n_jobs=self.n_jobs, dict_init=self.dict_init, batch_size=self.batch_size, shuffle=self.shuffle, verbose=self.verbose, random_state=random_state, return_inner_stats=True, return_n_iter=True) self.components_ = U # Keep track of the state of the algorithm to be able to do # some online fitting (partial_fit) self.inner_stats_ = (A, B) self.iter_offset_ = self.n_iter return self def partial_fit(self, X, y=None, iter_offset=None): """Updates the model using the data in X as a mini-batch. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. iter_offset : integer, optional The number of iteration on data batches that has been performed before this call to partial_fit. This is optional: if no number is passed, the memory of the object is used. Returns ------- self : object Returns the instance itself. """ if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) X = check_array(X) if hasattr(self, 'components_'): dict_init = self.components_ else: dict_init = self.dict_init inner_stats = getattr(self, 'inner_stats_', None) if iter_offset is None: iter_offset = getattr(self, 'iter_offset_', 0) U, (A, B) = dict_learning_online( X, self.n_components, self.alpha, n_iter=self.n_iter, method=self.fit_algorithm, n_jobs=self.n_jobs, dict_init=dict_init, batch_size=len(X), shuffle=False, verbose=self.verbose, return_code=False, iter_offset=iter_offset, random_state=self.random_state_, return_inner_stats=True, inner_stats=inner_stats) self.components_ = U # Keep track of the state of the algorithm to be able to do # some online fitting (partial_fit) self.inner_stats_ = (A, B) self.iter_offset_ = iter_offset + self.n_iter return self
bsd-3-clause
pme1123/pyroots
pyroots/utilities.py
1
12161
""" Created on Fri May 6 11:24:24 2016 @author: pme Contents: _zoom band_viewer multi_image_plot random_blobs tiff_splitter img_rescaler """ from matplotlib import pyplot as plt from scipy import ndimage import numpy as np from skimage import io, color, img_as_ubyte import os from multiprocessing.dummy import Pool import pyroots as pr from skimage import io, img_as_ubyte from tqdm import tqdm from multiprocessing import Pool from time import sleep from shutil import copy2 # for file subsampler import random def _zoom(image, xmin, xmax, ymin, ymax, set_scale=False): """ Subset an array to the bounding box suggested by the titles If `set_scale`, then the first two pixels of the slice are set to the max and min of the entire image. """ out = image[ymin:ymax, xmin:xmax] if set_scale: if len(image.shape) == 3: out[0, 0, 0:3] = [np.max(image[:, :, i]) for i in range(image.shape[2])] out[1, 1, 0:3] = [np.min(image[:, :, i]) for i in range(image.shape[2])] else: out[0, 0] = np.max(image) out[1, 1] = np.min(image) return(out) def band_viewer(img, colorspace, zoom_coords = None, return_bands=False): """ Utility function to look at the separate bands of multiple colorspace versions. Parameters ---------- img : array RGB image, such as imported by ``skimage.io.imread`` colorspace : str Colorspace to which to convert. Must be finish one the ``skimage.color.rgb2*`` functions. zoom_coords : list List of four integers denoting the start and end of an area of interest to view more closely, or none return_bands : bool Do you want to return the bands of the colorspace in an object? Returns ------- Plots the bands of the given colorspace using ``pyroots.multi_image_plot``. If ``return_bands`` is ``True``, returns these bands in a list. See Also -------- ``skimage.color``, ``pyroots.multi_image_split`` """ if zoom_coords is None: zoom_coords = {'xmin' : 0, 'xmax' : img.shape[1], 'ymin' : 0, 'ymax' : img.shape[0]} elif isinstance(zoom_coords, list) is True: zoom_coords = {'xmin' : zoom_coords[0], 'xmax' : zoom_coords[1], 'ymin' : zoom_coords[2], 'ymax' : zoom_coords[3]} elif isinstance(zoom_coords, dict) is False: raise "Zoom Coordinates Issue" #image is rgb if colorspace is not "rgb": img = getattr(color, 'rgb2' + colorspace)(img) bands = pr.img_split(img) if return_bands is True: return (bands) else: pr.multi_image_plot([pr._zoom(i, **zoom_coords) for i in bands], [colorspace[0], colorspace[1], colorspace[2]]) def multi_image_plot(images, titles, color_map="gray", axis="off", titlesize=16, interpolation="None"): """ Wrapper function for ``pyplot.imshow`` to plot multiple images along a single row for easy comparisons. Requires ``matplotlib.pyplot`` Parameters ---------- images : list Image arrays to plot titles : list Titles (strings) for each image color_map : string Color map for 1d images. 3d images plot normally axis : bool Show pixel location axis? titlesize : int Size of titles interpolation : str Interpolation method for rendering Returns ------- An array of images generated using ``matplotlib.pyplot.show()`` """ from matplotlib import pyplot as plt n = len(images) plt.figure(figsize=(3*n, 4)) for k in range(1, n+1): plt.subplot(1, n, k) plt.imshow(images[k-1], cmap=color_map, interpolation = interpolation) plt.axis(axis) plt.title(titles[k-1], size=titlesize) plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0, left=0, right=1) return plt.show() def random_blobs(n=100, dims=256, seed=1, size=0.25, noise=True): """ Function to create a square image with blobs formed around randomly placed points. The image can include random noise to make everything blurry for testing methods. Requires ``numpy`` and ``scipy.ndimage``. Parameters ---------- n : int set the number of points dims : int length of each side of the square seed : int set random number seed size : float set relative size of blobs around each point noise : bool add noise to the image Returns ------- An dims*dims array of blobs as either boolean or float """ im = np.zeros((dims, dims)) np.random.seed(seed) points = (dims * np.random.random((2, n))).astype(np.int) #blob locations im[(points[0]), (points[1])] = 1 im = ndimage.gaussian_filter(im, sigma=float(size) * dims / (n)) #blob size mask = (im > im.mean()).astype(np.float) #make hard lines around blobs if noise is True: mask += 0.1*im img = mask + 0.2 * np.random.randn(*mask.shape) #matrix of the shape of mask img += abs(img.min()) img = img / img.max() else: img = mask return img def tiff_splitter(directory_in, extension=".tif", threads=1): """ Silly function to load a tiff file and resave it. This is critical if the tiff is a multi-page tiff (ex. images, thumbnails) and you want to pre-process it with GIMP, for example. The output is the first page of the tiff. Parameters ---------- directory_in : str Directory where the images are extension : str Image extension. Defaults to ```".tif"```, but could be anything. threads : int For multithreading. This can be a stupidly slow function. Returns ------- Creates a directory called "split_images" in ```directory_in``` that has copies of the same images, but without thumbnails. """ #core function to map across threads def _load_unload_image(file_in): if file_in.endswith(extension): path_in = subdir + os.sep + file_in # what's the image called and where is it? path_out = directory_out + sub_path + os.sep + file_in #Import and export image io.imsave(path_out, io.imread(path_in)) print("Split: " + ".." + sub_path + os.sep + file_in) else: print("Skip: " + ".." + sub_path + os.sep + file_in) # housekeeping directory_out = directory_in + os.sep + "split_images" if not os.path.exists(directory_out): os.mkdir(directory_out) # initiate threads threadpool = Pool(threads) for subdir, dirs, files in os.walk(directory_in): sub_path = subdir[len(directory_in): ] if not "split_images" in subdir: if not os.path.exists(directory_out + subdir[len(directory_in): ]): os.mkdir(directory_out + subdir[len(directory_in): ]) threadpool.map(_load_unload_image, files) # end threads threadpool.close() threadpool.join() return("Done") def img_rescaler(dir_in, extension_in, threads=1): """ Import an image, rescale it to normal UBYTE (0-255, 8 bit) range, and re-save it. """ dir_out = os.path.join(dir_in, "rescaled") total_files = 0 for path, folder, filename in os.walk(dir_in): if dir_out not in path: for f in filename: if f.endswith(extension_in): total_files += 1 print("\nYou have {} images to analyze".format(total_files)) for path, folder, filename in os.walk(dir_in): if dir_out not in path: # Don't run in the output directory. # Make directory for saving objects subpath = path[len(dir_in)+1:] if not os.path.exists(os.path.join(dir_out, subpath)): os.mkdir(os.path.join(dir_out, subpath)) # What we'll do: global _core_fn # bad form for Pool.map() compatibility def _core_fn(filename): if filename.endswith(extension_in): # count progress. path_in = os.path.join(path, filename) subpath_in = os.path.join(subpath, filename) # for printing purposes path_out = os.path.join(dir_out, subpath, filename) if os.path.exists(path_out): #skip print("\nALREADY ANALYZED: {}. Skipping...\n".format(subpath_in)) else: #(try to) do it try: img = io.imread(path_in) # load image img = img_as_ubyte(img / np.max(img)) io.imsave(path_out, img) except: print("Couldn't analyze {}".format(subpath_in)) return() # run it sleep(1) # to give everything time to load thread_pool = Pool(threads) # Work on _core_fn (and give progressbar) tqdm.tqdm(thread_pool.imap_unordered(_core_fn, filename, chunksize=1), total=total_files) # finish thread_pool.close() thread_pool.join() return() ##################################################### ##################################################### ####### ###### ####### File Subsampler ###### ####### ###### ##################################################### ##################################################### def file_subsampler(N, dir_in, extension_in, dir_out): """ Subsamples files of type `extension_in` in `dir_in`. Copies them to a folder `dir_in/dir_out` as type `extension_out` (defaults to same as `extension_in`). Parameters ---------- N : int Number of files to select dir_in : str Full path to directory containing files. extension_in : str Extension of input files. dir_out : str Full path to directory to write output. Returns ------- Saves image files """ # Count files to analyze for status bar, and make lists of directories subpaths = [] # directories to make in dir_out files_in = [] # input files files_out = [] # output files file_names = [] # names of output files, including subpaths for path, folder, filename in os.walk(dir_in): if dir_out not in path: for f in filename: if f.endswith(extension_in): files_in.append(os.path.join(path, f)) # input files files_in = random.sample(files_in, N) # identify folders to make in dir_out for i in files_in: subpath = os.path.dirname(i)[len(dir_in)+1 : ] test_sub = sum([i == subpath for i in subpaths]) if test_sub==0: subpaths.append(subpath) # for making paths later on # identify file_names for the output fname = os.path.basename(i) files_out.append(os.path.join(dir_out, subpath, fname)) # make output directories if not os.path.exists(dir_out): os.mkdir(dir_out) for i in subpaths: if not os.path.exists(os.path.join(dir_out, i)): os.mkdir(os.path.join(dir_out, i)) for i in tqdm(range(N)): copy2(files_in[i], files_out[i]) return('Done')
apache-2.0
simon-anders/htseq
python3/HTSeq/scripts/qa.py
1
8342
#!/usr/bin/env python # HTSeq_QA.py # # (c) Simon Anders, European Molecular Biology Laboratory, 2010 # released under GNU General Public License import sys import os.path import optparse from itertools import * import numpy import HTSeq def main(): try: import matplotlib except ImportError: sys.stderr.write("This script needs the 'matplotlib' library, which ") sys.stderr.write("was not found. Please install it.") matplotlib.use('PDF') from matplotlib import pyplot # Matplotlib <1.5 uses normalize, so this block will be deprecated try: from matplotlib.pyplot import Normalize except ImportError: from matplotlib.pyplot import normalize as Normalize # **** Parse command line **** optParser = optparse.OptionParser( usage="%prog [options] read_file", description= "This script take a file with high-throughput sequencing reads " + "(supported formats: SAM, Solexa _export.txt, FASTQ, Solexa " + "_sequence.txt) and performs a simply quality assessment by " + "producing plots showing the distribution of called bases and " + "base-call quality scores by position within the reads. The " + "plots are output as a PDF file.", epilog = "Written by Simon Anders ([email protected]), European Molecular Biology " + " Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " + " Public License v3. Part of the 'HTSeq' framework, version %s." % HTSeq.__version__) optParser.add_option( "-t", "--type", type="choice", dest="type", choices=("sam", "bam", "solexa-export", "fastq", "solexa-fastq"), default="sam", help="type of read_file (one of: sam [default], bam, " + "solexa-export, fastq, solexa-fastq)") optParser.add_option( "-o", "--outfile", type="string", dest="outfile", help="output filename (default is <read_file>.pdf)") optParser.add_option( "-r", "--readlength", type="int", dest="readlen", help="the maximum read length (when not specified, the script guesses from the file") optParser.add_option( "-g", "--gamma", type="float", dest="gamma", default=0.3, help="the gamma factor for the contrast adjustment of the quality score plot") optParser.add_option( "-n", "--nosplit", action="store_true", dest="nosplit", help="do not split reads in unaligned and aligned ones") optParser.add_option( "-m", "--maxqual", type="int", dest="maxqual", default=41, help="the maximum quality score that appears in the data (default: 41)") if len(sys.argv) == 1: optParser.print_help() sys.exit(1) (opts, args) = optParser.parse_args() if len(args) != 1: sys.stderr.write( sys.argv[0] + ": Error: Please provide one argument (the read_file).\n") sys.stderr.write(" Call with '-h' to get usage information.\n") sys.exit(1) readfilename = args[0] if opts.type == "sam": readfile = HTSeq.SAM_Reader(readfilename) isAlnmntFile = True elif opts.type == "bam": readfile = HTSeq.BAM_Reader(readfilename) isAlnmntFile = True elif opts.type == "solexa-export": readfile = HTSeq.SolexaExportReader(readfilename) isAlnmntFile = True elif opts.type == "fastq": readfile = HTSeq.FastqReader(readfilename) isAlnmntFile = False elif opts.type == "solexa-fastq": readfile = HTSeq.FastqReader(readfilename, "solexa") isAlnmntFile = False else: sys.error("Oops.") twoColumns = isAlnmntFile and not opts.nosplit if opts.outfile is None: outfilename = os.path.basename(readfilename) + ".pdf" else: outfilename = opts.outfile # **** Get read length **** if opts.readlen is not None: readlen = opts.readlen else: readlen = 0 if isAlnmntFile: reads = (a.read for a in readfile) else: reads = readfile for r in islice(reads, 10000): if len(r) > readlen: readlen = len(r) max_qual = opts.maxqual gamma = opts.gamma # **** Initialize count arrays **** base_arr_U = numpy.zeros((readlen, 5), numpy.int) qual_arr_U = numpy.zeros((readlen, max_qual+1), numpy.int) if twoColumns: base_arr_A = numpy.zeros((readlen, 5), numpy.int) qual_arr_A = numpy.zeros((readlen, max_qual+1), numpy.int) # **** Main counting loop **** i = 0 try: for a in readfile: if isAlnmntFile: r = a.read else: r = a if twoColumns and (isAlnmntFile and a.aligned): r.add_bases_to_count_array(base_arr_A) r.add_qual_to_count_array(qual_arr_A) else: r.add_bases_to_count_array(base_arr_U) r.add_qual_to_count_array(qual_arr_U) i += 1 if (i % 200000) == 0: print(i, "reads processed") except: sys.stderr.write("Error occured in: %s\n" % readfile.get_line_number_string()) raise print(i, "reads processed") # **** Normalize result **** def norm_by_pos(arr): arr = numpy.array(arr, numpy.float) arr_n = (arr.T / arr.sum(1)).T arr_n[arr == 0] = 0 return arr_n def norm_by_start(arr): arr = numpy.array(arr, numpy.float) arr_n = (arr.T / arr.sum(1)[0]).T arr_n[arr == 0] = 0 return arr_n base_arr_U_n = norm_by_pos(base_arr_U) qual_arr_U_n = norm_by_start(qual_arr_U) nreads_U = base_arr_U[0, :].sum() if twoColumns: base_arr_A_n = norm_by_pos(base_arr_A) qual_arr_A_n = norm_by_start(qual_arr_A) nreads_A = base_arr_A[0, :].sum() # **** Make plot **** def plot_bases(arr): xg = numpy.arange(readlen) pyplot.plot(xg, arr[:, 0], marker='.', color='red') pyplot.plot(xg, arr[:, 1], marker='.', color='darkgreen') pyplot.plot(xg, arr[:, 2], marker='.', color='lightgreen') pyplot.plot(xg, arr[:, 3], marker='.', color='orange') pyplot.plot(xg, arr[:, 4], marker='.', color='grey') pyplot.axis((0, readlen-1, 0, 1)) pyplot.text(readlen*.70, .9, "A", color="red") pyplot.text(readlen*.75, .9, "C", color="darkgreen") pyplot.text(readlen*.80, .9, "G", color="lightgreen") pyplot.text(readlen*.85, .9, "T", color="orange") pyplot.text(readlen*.90, .9, "N", color="grey") pyplot.figure() pyplot.subplots_adjust(top=.85) pyplot.suptitle(os.path.basename(readfilename), fontweight='bold') if twoColumns: pyplot.subplot(221) plot_bases(base_arr_U_n) pyplot.ylabel("proportion of base") pyplot.title("non-aligned reads\n%.0f%% (%.4f million)" % (100. * nreads_U / (nreads_U+nreads_A), nreads_U / 1e6)) pyplot.subplot(222) plot_bases(base_arr_A_n) pyplot.title("aligned reads\n%.0f%% (%.4f million)" % (100. * nreads_A / (nreads_U+nreads_A), nreads_A / 1e6)) pyplot.subplot(223) pyplot.pcolor(qual_arr_U_n.T ** gamma, cmap=pyplot.cm.Greens, norm=Normalize(0, 1)) pyplot.axis((0, readlen-1, 0, max_qual+1)) pyplot.xlabel("position in read") pyplot.ylabel("base-call quality score") pyplot.subplot(224) pyplot.pcolor(qual_arr_A_n.T ** gamma, cmap=pyplot.cm.Greens, norm=Normalize(0, 1)) pyplot.axis((0, readlen-1, 0, max_qual+1)) pyplot.xlabel("position in read") else: pyplot.subplot(211) plot_bases(base_arr_U_n) pyplot.ylabel("proportion of base") pyplot.title("%.3f million reads" % (nreads_U / 1e6)) pyplot.subplot(212) pyplot.pcolor(qual_arr_U_n.T ** gamma, cmap=pyplot.cm.Greens, norm=Normalize(0, 1)) pyplot.axis((0, readlen-1, 0, max_qual+1)) pyplot.xlabel("position in read") pyplot.ylabel("base-call quality score") pyplot.savefig(outfilename) if __name__ == "__main__": main()
gpl-3.0
bluesquall/rlspy
examples/compare_to_batch.py
1
1924
#!/usr/bin/env python """ Batch LS vs. RLS comparison example =================================== """ import numpy as np import rlspy def generate_random_truth_data(order = 3, sigma = 1): return np.random.normal(0, sigma, [order, 1]) def generate_random_coupling_matrix(shape = [4, 3]): return np.random.normal(0, 1, shape) def generate_noisy_measurements(A, x, sigma): return np.dot(A, x) + np.random.normal(0, sigma) def example(order = 3, morder = 4, N = 20): x = generate_random_truth_data(order, 1) A = [generate_random_coupling_matrix([morder, order]) for i in xrange(N)] sm = 1e-2 * np.ones(morder).reshape(-1, 1) V = np.diag(sm.ravel()**2) b = [generate_noisy_measurements(Ai, x, sm) for Ai in A] x0 = np.ones(order).reshape(-1, 1) P0 = np.identity(order) rlsi = rlspy.data_matrix.Estimator(x0, P0) # preallocate some arrays to track the evolution of the estimate xest = np.empty([order, N + 1]) Pest = np.empty([order, order, N + 1]) xest[:,0] = x0.ravel() Pest[:,:,0] = P0 # run the RLS identification for i, (Ai, bi) in enumerate(zip(A, b)): rlsi.update(Ai, bi, V) xest[:, i + 1] = rlsi.x.ravel() Pest[:, :, i + 1] = rlsi.P xerr = x - xest # organize the coupling matrices and data for batch LS Ab = np.vstack(A) bb = np.vstack(b) bxest, residue, rankA, singA = np.linalg.lstsq(Ab, bb) bxerr = x - bxest bPest = None # TODO A inv(V) A.T, needs full data V return xest, Pest, xerr, bxest, bPest, bxerr if __name__ == '__main__': import matplotlib.pyplot as plt order = 2 morder = 2 N = 200 x, P, r, bx, bp, br = example(order, morder, N) plt.semilogy(np.abs(r.T)) plt.semilogy(np.array([0,N]), np.outer(np.ones(order), np.abs(br.T)), lw=2) plt.grid(True) plt.ylabel('abs(estimation error)') plt.xlabel('iteration') plt.show()
mit
ndchorley/scipy
scipy/stats/morestats.py
6
87719
# Author: Travis Oliphant, 2002 # # Further updates and enhancements by many SciPy developers. # from __future__ import division, print_function, absolute_import import math import warnings from collections import namedtuple import numpy as np from numpy import (isscalar, r_, log, sum, around, unique, asarray, zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil, floor, array, poly1d, compress, pi, exp, ravel, angle, count_nonzero) from numpy.testing.decorators import setastest from scipy._lib.six import string_types from scipy import optimize from scipy import special from . import statlib from . import stats from .stats import find_repeats from .contingency import chi2_contingency from . import distributions from ._distn_infrastructure import rv_generic __all__ = ['mvsdist', 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', 'fligner', 'mood', 'wilcoxon', 'median_test', 'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp' ] def bayes_mvs(data, alpha=0.90): """ Bayesian confidence intervals for the mean, var, and std. Parameters ---------- data : array_like Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. Requires 2 or more data points. alpha : float, optional Probability that the returned confidence interval contains the true parameter. Returns ------- mean_cntr, var_cntr, std_cntr : tuple The three results are for the mean, variance and standard deviation, respectively. Each result is a tuple of the form:: (center, (lower, upper)) with `center` the mean of the conditional pdf of the value given the data, and `(lower, upper)` a confidence interval, centered on the median, containing the estimate to a probability ``alpha``. Notes ----- Each tuple of mean, variance, and standard deviation estimates represent the (center, (lower, upper)) with center the mean of the conditional pdf of the value given the data and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability ``alpha``. Converts data to 1-D and assumes all data has the same mean and variance. Uses Jeffrey's prior for variance and std. Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", http://hdl.handle.net/1877/438, 2006. """ m, v, s = mvsdist(data) if alpha >= 1 or alpha <= 0: raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha) Mean = namedtuple('Mean', ('statistic', 'minmax')) Variance = namedtuple('Variance', ('statistic', 'minmax')) Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) m_res = Mean(m.mean(), m.interval(alpha)) v_res = Variance(v.mean(), v.interval(alpha)) s_res = Std_dev(s.mean(), s.interval(alpha)) return m_res, v_res, s_res def mvsdist(data): """ 'Frozen' distributions for mean, variance, and standard deviation of data. Parameters ---------- data : array_like Input array. Converted to 1-D using ravel. Requires 2 or more data-points. Returns ------- mdist : "frozen" distribution object Distribution object representing the mean of the data vdist : "frozen" distribution object Distribution object representing the variance of the data sdist : "frozen" distribution object Distribution object representing the standard deviation of the data Notes ----- The return values from bayes_mvs(data) is equivalent to ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` on the three distribution objects returned from this function will give the same results that are returned from `bayes_mvs`. Examples -------- >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.mvsdist(data) We now have frozen distribution objects "mean", "var" and "std" that we can examine: >>> mean.mean() 9.0 >>> mean.interval(0.95) (6.6120585482655692, 11.387941451734431) >>> mean.std() 1.1952286093343936 """ x = ravel(data) n = len(x) if n < 2: raise ValueError("Need at least 2 data-points.") xbar = x.mean() C = x.var() if n > 1000: # gaussian approximations for large n mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) else: nm1 = n - 1 fac = n * C / 2. val = nm1 / 2. mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) vdist = distributions.invgamma(val, scale=fac) return mdist, vdist, sdist def kstat(data, n=2): """ Return the nth k-statistic (1<=n<=4 so far). The nth k-statistic is the unique symmetric unbiased estimator of the nth cumulant kappa_n. Parameters ---------- data : array_like Input array. n : int, {1, 2, 3, 4}, optional Default is equal to 2. Returns ------- kstat : float The nth k-statistic. See Also -------- kstatvar: Returns an unbiased estimator of the variance of the k-statistic. Notes ----- The cumulants are related to central moments but are specifically defined using a power series expansion of the logarithm of the characteristic function (which is the Fourier transform of the PDF). In particular let phi(t) be the characteristic function, then:: ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf) The first few cumulants (kappa_n) in terms of central moments (mu_n) are:: kappa_1 = mu_1 kappa_2 = mu_2 kappa_3 = mu_3 kappa_4 = mu_4 - 3*mu_2**2 kappa_5 = mu_5 - 10*mu_2 * mu_3 References ---------- http://mathworld.wolfram.com/k-Statistic.html http://mathworld.wolfram.com/Cumulant.html """ if n > 4 or n < 1: raise ValueError("k-statistics only supported for 1<=n<=4") n = int(n) S = zeros(n + 1, 'd') data = ravel(data) N = len(data) for k in range(1, n + 1): S[k] = sum(data**k, axis=0) if n == 1: return S[1] * 1.0/N elif n == 2: return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) elif n == 3: return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) elif n == 4: return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / (N*(N-1.0)*(N-2.0)*(N-3.0))) else: raise ValueError("Should not be here.") def kstatvar(data, n=2): """ Returns an unbiased estimator of the variance of the k-statistic. See `kstat` for more details of the k-statistic. Parameters ---------- data : array_like Input array. n : int, {1, 2}, optional Default is equal to 2. Returns ------- kstatvar : float The nth k-statistic variance. See Also -------- kstat """ data = ravel(data) N = len(data) if n == 1: return kstat(data, n=2) * 1.0/N elif n == 2: k2 = kstat(data, n=2) k4 = kstat(data, n=4) return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) else: raise ValueError("Only n=1 or n=2 supported.") def _calc_uniform_order_statistic_medians(x): """See Notes section of `probplot` for details.""" N = len(x) osm_uniform = np.zeros(N, dtype=np.float64) osm_uniform[-1] = 0.5**(1.0 / N) osm_uniform[0] = 1 - osm_uniform[-1] i = np.arange(2, N) osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365) return osm_uniform def _parse_dist_kw(dist, enforce_subclass=True): """Parse `dist` keyword. Parameters ---------- dist : str or stats.distributions instance. Several functions take `dist` as a keyword, hence this utility function. enforce_subclass : bool, optional If True (default), `dist` needs to be a `_distn_infrastructure.rv_generic` instance. It can sometimes be useful to set this keyword to False, if a function wants to accept objects that just look somewhat like such an instance (for example, they have a ``ppf`` method). """ if isinstance(dist, rv_generic): pass elif isinstance(dist, string_types): try: dist = getattr(distributions, dist) except AttributeError: raise ValueError("%s is not a valid distribution name" % dist) elif enforce_subclass: msg = ("`dist` should be a stats.distributions instance or a string " "with the name of such a distribution.") raise ValueError(msg) return dist def _add_axis_labels_title(plot, xlabel, ylabel, title): """Helper function to add axes labels and a title to stats plots""" try: if hasattr(plot, 'set_title'): # Matplotlib Axes instance or something that looks like it plot.set_title(title) plot.set_xlabel(xlabel) plot.set_ylabel(ylabel) else: # matplotlib.pyplot module plot.title(title) plot.xlabel(xlabel) plot.ylabel(ylabel) except: # Not an MPL object or something that looks (enough) like it. # Don't crash on adding labels or title pass def probplot(x, sparams=(), dist='norm', fit=True, plot=None): """ Calculate quantiles for a probability plot, and optionally show the plot. Generates a probability plot of sample data against the quantiles of a specified theoretical distribution (the normal distribution by default). `probplot` optionally calculates a best-fit line for the data and plots the results using Matplotlib or a given plot function. Parameters ---------- x : array_like Sample/response data from which `probplot` creates the plot. sparams : tuple, optional Distribution-specific shape parameters (shape parameters plus location and scale). dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. fit : bool, optional Fit a least-squares regression (best-fit) line to the sample data if True (default). plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. Returns ------- (osm, osr) : tuple of ndarrays Tuple of theoretical quantiles (osm, or order statistic medians) and ordered responses (osr). `osr` is simply sorted input `x`. For details on how `osm` is calculated see the Notes section. (slope, intercept, r) : tuple of floats, optional Tuple containing the result of the least-squares fit, if that is performed by `probplot`. `r` is the square root of the coefficient of determination. If ``fit=False`` and ``plot=None``, this tuple is not returned. Notes ----- Even if `plot` is given, the figure is not shown or saved by `probplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. `probplot` generates a probability plot, which should not be confused with a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this type, see ``statsmodels.api.ProbPlot``. The formula used for the theoretical quantiles (horizontal axis of the probability plot) is Filliben's estimate:: quantiles = dist.ppf(val), for 0.5**(1/n), for i = n val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 1 - 0.5**(1/n), for i = 1 where ``i`` indicates the i-th ordered value and ``n`` is the total number of values. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> nsample = 100 >>> np.random.seed(7654321) A t distribution with small degrees of freedom: >>> ax1 = plt.subplot(221) >>> x = stats.t.rvs(3, size=nsample) >>> res = stats.probplot(x, plot=plt) A t distribution with larger degrees of freedom: >>> ax2 = plt.subplot(222) >>> x = stats.t.rvs(25, size=nsample) >>> res = stats.probplot(x, plot=plt) A mixture of two normal distributions with broadcasting: >>> ax3 = plt.subplot(223) >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], ... size=(nsample/2.,2)).ravel() >>> res = stats.probplot(x, plot=plt) A standard normal distribution: >>> ax4 = plt.subplot(224) >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) >>> res = stats.probplot(x, plot=plt) Produce a new figure with a loggamma distribution, using the ``dist`` and ``sparams`` keywords: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> x = stats.loggamma.rvs(c=2.5, size=500) >>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") Show the results with Matplotlib: >>> plt.show() """ x = np.asarray(x) _perform_fit = fit or (plot is not None) if x.size == 0: if _perform_fit: return (x, x), (np.nan, np.nan, 0.0) else: return x, x osm_uniform = _calc_uniform_order_statistic_medians(x) dist = _parse_dist_kw(dist, enforce_subclass=False) if sparams is None: sparams = () if isscalar(sparams): sparams = (sparams,) if not isinstance(sparams, tuple): sparams = tuple(sparams) osm = dist.ppf(osm_uniform, *sparams) osr = sort(x) if _perform_fit: # perform a linear least squares fit. slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) if plot is not None: plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-') _add_axis_labels_title(plot, xlabel='Quantiles', ylabel='Ordered Values', title='Probability Plot') # Add R^2 value to the plot as text xmin = amin(osm) xmax = amax(osm) ymin = amin(x) ymax = amax(x) posx = xmin + 0.70 * (xmax - xmin) posy = ymin + 0.01 * (ymax - ymin) plot.text(posx, posy, "$R^2=%1.4f$" % r**2) if fit: return (osm, osr), (slope, intercept, r) else: return osm, osr def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): """ Calculate the shape parameter that maximizes the PPCC The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. ppcc_max returns the shape parameter that would maximize the probability plot correlation coefficient for the given data to a one-parameter family of distributions. Parameters ---------- x : array_like Input array. brack : tuple, optional Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c) then they are assumed to be a starting interval for a downhill bracket search (see `scipy.optimize.brent`). dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. Returns ------- shape_value : float The shape parameter at which the probability plot correlation coefficient reaches its max value. See also -------- ppcc_plot, probplot, boxcox Notes ----- The brack keyword serves as a starting point which is useful in corner cases. One can use a plot to obtain a rough visual estimate of the location for the maximum to start the search near it. References ---------- .. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. .. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, ... random_state=1234567) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> import matplotlib.pyplot as plt >>> fig = plt.figure(figsize=(8, 6)) >>> ax = fig.add_subplot(111) >>> stats.ppcc_plot(x, -5, 5, plot=ax) We calculate the value where the shape should reach its maximum and a red line is drawn there. The line should coincide with the highest point in the ppcc_plot. >>> max = stats.ppcc_max(x) >>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ dist = _parse_dist_kw(dist) osm_uniform = _calc_uniform_order_statistic_medians(x) osr = sort(x) # this function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) # and returns 1-r so that a minimization function maximizes the # correlation def tempfunc(shape, mi, yvals, func): xvals = func(mi, shape) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf)) def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): """ Calculate and optionally plot probability plot correlation coefficient. The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. It cannot be used for distributions without shape parameters (like the normal distribution) or with multiple shape parameters. By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed distributions via an approximately normal one, and is therefore particularly useful in practice. Parameters ---------- x : array_like Input array. a, b: scalar Lower and upper bounds of the shape parameter to use. dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. plot : object, optional If given, plots PPCC against the shape parameter. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `a` to `b`). Returns ------- svals : ndarray The shape values for which `ppcc` was calculated. ppcc : ndarray The calculated probability plot correlation coefficient values. See also -------- ppcc_max, probplot, boxcox_normplot, tukeylambda References ---------- J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234567) >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> fig = plt.figure(figsize=(12, 4)) >>> ax1 = fig.add_subplot(131) >>> ax2 = fig.add_subplot(132) >>> ax3 = fig.add_subplot(133) >>> stats.probplot(x, plot=ax1) >>> stats.boxcox_normplot(x, -5, 5, plot=ax2) >>> stats.ppcc_plot(x, -5, 5, plot=ax3) >>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ if b <= a: raise ValueError("`b` has to be larger than `a`.") svals = np.linspace(a, b, num=N) ppcc = np.empty_like(svals) for k, sval in enumerate(svals): _, r2 = probplot(x, sval, dist=dist, fit=True) ppcc[k] = r2[-1] if plot is not None: plot.plot(svals, ppcc, 'x') _add_axis_labels_title(plot, xlabel='Shape Values', ylabel='Prob Plot Corr. Coef.', title='(%s) PPCC Plot' % dist) return svals, ppcc def boxcox_llf(lmb, data): r"""The boxcox log-likelihood function. Parameters ---------- lmb : scalar Parameter for Box-Cox transformation. See `boxcox` for details. data : array_like Data to calculate Box-Cox log-likelihood for. If `data` is multi-dimensional, the log-likelihood is calculated along the first axis. Returns ------- llf : float or ndarray Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, an array otherwise. See Also -------- boxcox, probplot, boxcox_normplot, boxcox_normmax Notes ----- The Box-Cox log-likelihood function is defined here as .. math:: llf = (\lambda - 1) \sum_i(\log(x_i)) - N/2 \log(\sum_i (y_i - \bar{y})^2 / N), where ``y`` is the Box-Cox transformed input data ``x``. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes >>> np.random.seed(1245) Generate some random variates and calculate Box-Cox log-likelihood values for them for a range of ``lmbda`` values: >>> x = stats.loggamma.rvs(5, loc=10, size=1000) >>> lmbdas = np.linspace(-2, 10) >>> llf = np.zeros(lmbdas.shape, dtype=np.float) >>> for ii, lmbda in enumerate(lmbdas): ... llf[ii] = stats.boxcox_llf(lmbda, x) Also find the optimal lmbda value with `boxcox`: >>> x_most_normal, lmbda_optimal = stats.boxcox(x) Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a horizontal line to check that that's really the optimum: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(lmbdas, llf, 'b.-') >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') >>> ax.set_xlabel('lmbda parameter') >>> ax.set_ylabel('Box-Cox log-likelihood') Now add some probability plots to show that where the log-likelihood is maximized the data transformed with `boxcox` looks closest to normal: >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): ... xt = stats.boxcox(x, lmbda=lmbda) ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') ... ax_inset.set_xticklabels([]) ... ax_inset.set_yticklabels([]) ... ax_inset.set_title('$\lambda=%1.2f$' % lmbda) >>> plt.show() """ data = np.asarray(data) N = data.shape[0] if N == 0: return np.nan y = boxcox(data, lmb) y_mean = np.mean(y, axis=0) llf = (lmb - 1) * np.sum(np.log(data), axis=0) llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0)) return llf def _boxcox_conf_interval(x, lmax, alpha): # Need to find the lambda for which # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) target = boxcox_llf(lmax, x) - fac def rootfunc(lmbda, data, target): return boxcox_llf(lmbda, data) - target # Find positive endpoint of interval in which answer is to be found newlm = lmax + 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm += 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) # Now find negative interval in the same way newlm = lmax - 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm -= 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) return lmminus, lmplus def boxcox(x, lmbda=None, alpha=None): r""" Return a positive dataset transformed by a Box-Cox power transformation. Parameters ---------- x : ndarray Input array. Should be 1-dimensional. lmbda : {None, scalar}, optional If `lmbda` is not None, do the transformation for that value. If `lmbda` is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument. alpha : {None, float}, optional If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third output argument. Must be between 0.0 and 1.0. Returns ------- boxcox : ndarray Box-Cox power transformed array. maxlog : float, optional If the `lmbda` parameter is None, the second returned argument is the lambda that maximizes the log-likelihood function. (min_ci, max_ci) : tuple of float, optional If `lmbda` parameter is None and ``alpha`` is not None, this returned tuple of floats represents the minimum and maximum confidence limits given ``alpha``. See Also -------- probplot, boxcox_normplot, boxcox_normmax, boxcox_llf Notes ----- The Box-Cox transform is given by:: y = (x**lmbda - 1) / lmbda, for lmbda > 0 log(x), for lmbda = 0 `boxcox` requires the input data to be positive. Sometimes a Box-Cox transformation provides a shift parameter to achieve this; `boxcox` does not. Such a shift parameter is equivalent to adding a positive constant to `x` before calling `boxcox`. The confidence limits returned when ``alpha`` is provided give the interval where: .. math:: llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared function. References ---------- G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt We generate some random variates from a non-normal distribution and make a probability plot for it, to show it is non-normal in the tails: >>> fig = plt.figure() >>> ax1 = fig.add_subplot(211) >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> stats.probplot(x, dist=stats.norm, plot=ax1) >>> ax1.set_xlabel('') >>> ax1.set_title('Probplot against normal distribution') We now use `boxcox` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, _ = stats.boxcox(x) >>> stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Box-Cox transformation') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if any(x <= 0): raise ValueError("Data must be positive.") if lmbda is not None: # single transformation return special.boxcox(x, lmbda) # If lmbda=None, find the lmbda that maximizes the log-likelihood function. lmax = boxcox_normmax(x, method='mle') y = boxcox(x, lmax) if alpha is None: return y, lmax else: # Find confidence interval interval = _boxcox_conf_interval(x, lmax, alpha) return y, lmax, interval def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'): """Compute optimal Box-Cox transform parameter for input data. Parameters ---------- x : array_like Input array. brack : 2-tuple, optional The starting interval for a downhill bracket search with `optimize.brent`. Note that this is in most cases not critical; the final result is allowed to be outside this bracket. method : str, optional The method to determine the optimal transform parameter (`boxcox` ``lmbda`` parameter). Options are: 'pearsonr' (default) Maximizes the Pearson correlation coefficient between ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be normally-distributed. 'mle' Minimizes the log-likelihood `boxcox_llf`. This is the method used in `boxcox`. 'all' Use all optimization methods available, and return all results. Useful to compare different methods. Returns ------- maxlog : float or ndarray The optimal transform parameter found. An array instead of a scalar for ``method='all'``. See Also -------- boxcox, boxcox_llf, boxcox_normplot Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234) # make this example reproducible Generate some data and determine optimal ``lmbda`` in various ways: >>> x = stats.loggamma.rvs(5, size=30) + 5 >>> y, lmax_mle = stats.boxcox(x) >>> lmax_pearsonr = stats.boxcox_normmax(x) >>> lmax_mle 7.177... >>> lmax_pearsonr 7.916... >>> stats.boxcox_normmax(x, method='all') array([ 7.91667384, 7.17718692]) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> stats.boxcox_normplot(x, -10, 10, plot=ax) >>> ax.axvline(lmax_mle, color='r') >>> ax.axvline(lmax_pearsonr, color='g', ls='--') >>> plt.show() """ def _pearsonr(x, brack): osm_uniform = _calc_uniform_order_statistic_medians(x) xvals = distributions.norm.ppf(osm_uniform) def _eval_pearsonr(lmbda, xvals, samps): # This function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) and # returns ``1 - r`` so that a minimization function maximizes the # correlation. y = boxcox(samps, lmbda) yvals = np.sort(y) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x)) def _mle(x, brack): def _eval_mle(lmb, data): # function to minimize return -boxcox_llf(lmb, data) return optimize.brent(_eval_mle, brack=brack, args=(x,)) def _all(x, brack): maxlog = np.zeros(2, dtype=np.float) maxlog[0] = _pearsonr(x, brack) maxlog[1] = _mle(x, brack) return maxlog methods = {'pearsonr': _pearsonr, 'mle': _mle, 'all': _all} if method not in methods.keys(): raise ValueError("Method %s not recognized." % method) optimfunc = methods[method] return optimfunc(x, brack) def boxcox_normplot(x, la, lb, plot=None, N=80): """Compute parameters for a Box-Cox normality plot, optionally show it. A Box-Cox normality plot shows graphically what the best transformation parameter is to use in `boxcox` to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` for Box-Cox transformations. These are also the limits of the horizontal axis of the plot if that is generated. plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `la` to `lb`). Returns ------- lmbdas : ndarray The ``lmbda`` values for which a Box-Cox transform was done. ppcc : ndarray Probability Plot Correlelation Coefficient, as obtained from `probplot` when fitting the Box-Cox transformed input `x` against a normal distribution. See Also -------- probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max Notes ----- Even if `plot` is given, the figure is not shown or saved by `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt Generate some non-normally distributed data, and create a Box-Cox plot: >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> stats.boxcox_normplot(x, -20, 20, plot=ax) Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in the same plot: >>> _, maxlog = stats.boxcox(x) >>> ax.axvline(maxlog, color='r') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if lb <= la: raise ValueError("`lb` has to be larger than `la`.") lmbdas = np.linspace(la, lb, num=N) ppcc = lmbdas * 0.0 for i, val in enumerate(lmbdas): # Determine for each lmbda the correlation coefficient of transformed x z = boxcox(x, lmbda=val) _, r2 = probplot(z, dist='norm', fit=True) ppcc[i] = r2[-1] if plot is not None: plot.plot(lmbdas, ppcc, 'x') _add_axis_labels_title(plot, xlabel='$\lambda$', ylabel='Prob Plot Corr. Coef.', title='Box-Cox Normality Plot') return lmbdas, ppcc def shapiro(x, a=None, reta=False): """ Perform the Shapiro-Wilk test for normality. The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution. Parameters ---------- x : array_like Array of sample data. a : array_like, optional Array of internal parameters used in the calculation. If these are not given, they will be computed internally. If x has length n, then a must have length n/2. reta : bool, optional Whether or not to return the internally computed a values. The default is False. Returns ------- W : float The test statistic. p-value : float The p-value for the hypothesis test. a : array_like, optional If `reta` is True, then these are the internally computed "a" values that may be passed into this function on future calls. See Also -------- anderson : The Anderson-Darling test for normality References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm """ N = len(x) if N < 3: raise ValueError("Data must be at least length 3.") if a is None: a = zeros(N, 'f') init = 0 else: if len(a) != N // 2: raise ValueError("len(a) must equal len(x)/2") init = 1 y = sort(x) a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) if ifault not in [0, 2]: warnings.warn(str(ifault)) if N > 5000: warnings.warn("p-value may not be accurate for N > 5000.") if reta: return w, pw, a else: return w, pw # Values from Stephens, M A, "EDF Statistics for Goodness of Fit and # Some Comparisons", Journal of he American Statistical # Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 _Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) _Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) # From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", # Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. _Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) # From Stephens, M A, "Tests of Fit for the Logistic Distribution Based # on the Empirical Distribution Function.", Biometrika, # Vol. 66, Issue 3, Dec. 1979, pp 591-595. _Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) def anderson(x, dist='norm'): """ Anderson-Darling test for data coming from a particular distribution The Anderson-Darling test is a modification of the Kolmogorov- Smirnov test `kstest` for the null hypothesis that a sample is drawn from a population that follows a particular distribution. For the Anderson-Darling test, the critical values depend on which distribution is being tested against. This function works for normal, exponential, logistic, or Gumbel (Extreme Value Type I) distributions. Parameters ---------- x : array_like array of sample data dist : {'norm','expon','logistic','gumbel','extreme1'}, optional the type of distribution to test against. The default is 'norm' and 'extreme1' is a synonym for 'gumbel' Returns ------- statistic : float The Anderson-Darling test statistic critical_values : list The critical values for this distribution significance_level : list The significance levels for the corresponding critical values in percents. The function returns critical values for a differing set of significance levels depending on the distribution that is being tested against. Notes ----- Critical values provided are for the following significance levels: normal/exponenential 15%, 10%, 5%, 2.5%, 1% logistic 25%, 10%, 5%, 2.5%, 1%, 0.5% Gumbel 25%, 10%, 5%, 2.5%, 1% If A2 is larger than these critical values then for the corresponding significance level, the null hypothesis that the data come from the chosen distribution can be rejected. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and Some Comparisons, Journal of the American Statistical Association, Vol. 69, pp. 730-737. .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, pp. 357-369. .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value Distribution, Biometrika, Vol. 64, pp. 583-588. .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference to Tests for Exponentiality , Technical Report No. 262, Department of Statistics, Stanford University, Stanford, CA. .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution Based on the Empirical Distribution Function, Biometrika, Vol. 66, pp. 591-595. """ if dist not in ['norm', 'expon', 'gumbel', 'extreme1', 'logistic']: raise ValueError("Invalid distribution; dist must be 'norm', " "'expon', 'gumbel', 'extreme1' or 'logistic'.") y = sort(x) xbar = np.mean(x, axis=0) N = len(y) if dist == 'norm': s = np.std(x, ddof=1, axis=0) w = (y - xbar) / s z = distributions.norm.cdf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) elif dist == 'expon': w = y / xbar z = distributions.expon.cdf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_expon / (1.0 + 0.6/N), 3) elif dist == 'logistic': def rootfunc(ab, xj, N): a, b = ab tmp = (xj - a) / b tmp2 = exp(tmp) val = [sum(1.0/(1+tmp2), axis=0) - 0.5*N, sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] return array(val) sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) w = (y - sol[0]) / sol[1] z = distributions.logistic.cdf(w) sig = array([25, 10, 5, 2.5, 1, 0.5]) critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) else: # (dist == 'gumbel') or (dist == 'extreme1'): xbar, s = distributions.gumbel_l.fit(x) w = (y - xbar) / s z = distributions.gumbel_l.cdf(w) sig = array([25, 10, 5, 2.5, 1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) i = arange(1, N + 1) A2 = -N - sum((2*i - 1.0) / N * (log(z) + log(1 - z[::-1])), axis=0) AndersonResult = namedtuple('AndersonResult', ('statistic', 'critical_values', 'significance_level')) return AndersonResult(A2, critical, sig) def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 7 of Scholz and Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2aKN : float The A2aKN statistics of Scholz and Stephens 1987. """ A2akN = 0. Z_ssorted_left = Z.searchsorted(Zstar, 'left') if N == Zstar.size: lj = 1. else: lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left Bj = Z_ssorted_left + lj / 2. for i in arange(0, k): s = np.sort(samples[i]) s_ssorted_right = s.searchsorted(Zstar, side='right') Mij = s_ssorted_right.astype(np.float) fij = s_ssorted_right - s.searchsorted(Zstar, 'left') Mij -= fij / 2. inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) A2akN += inner.sum() / n[i] A2akN *= (N - 1.) / N return A2akN def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 6 of Scholz & Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2KN : float The A2KN statistics of Scholz and Stephens 1987. """ A2kN = 0. lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], 'left') Bj = lj.cumsum() for i in arange(0, k): s = np.sort(samples[i]) Mij = s.searchsorted(Zstar[:-1], side='right') inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) A2kN += inner.sum() / n[i] return A2kN def anderson_ksamp(samples, midrank=True): """The Anderson-Darling test for k-samples. The k-sample Anderson-Darling test is a modification of the one-sample Anderson-Darling test. It tests the null hypothesis that k-samples are drawn from the same population without having to specify the distribution function of that population. The critical values depend on the number of samples. Parameters ---------- samples : sequence of 1-D array_like Array of sample data in arrays. midrank : bool, optional Type of Anderson-Darling test which is computed. Default (True) is the midrank test applicable to continuous and discrete populations. If False, the right side empirical distribution is used. Returns ------- statistic : float Normalized k-sample Anderson-Darling test statistic. critical_values : array The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%. significance_level : float An approximate significance level at which the null hypothesis for the provided samples can be rejected. Raises ------ ValueError If less than 2 samples are provided, a sample is empty, or no distinct observations are in the samples. See Also -------- ks_2samp : 2 sample Kolmogorov-Smirnov test anderson : 1 sample Anderson-Darling test Notes ----- [1]_ Defines three versions of the k-sample Anderson-Darling test: one for continuous distributions and two for discrete distributions, in which ties between samples may occur. The default of this routine is to compute the version based on the midrank empirical distribution function. This test is applicable to continuous and discrete data. If midrank is set to False, the right side empirical distribution is used for a test for discrete data. According to [1]_, the two discrete test statistics differ only slightly if a few collisions due to round-off errors occur in the test not adjusted for ties between samples. .. versionadded:: 0.14.0 References ---------- .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample Anderson-Darling Tests, Journal of the American Statistical Association, Vol. 82, pp. 918-924. Examples -------- >>> from scipy import stats >>> np.random.seed(314159) The null hypothesis that the two random samples come from the same distribution can be rejected at the 5% level because the returned test value is greater than the critical value for 5% (1.961) but not at the 2.5% level. The interpolation gives an approximate significance level of 3.1%: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(loc=0.5, size=30)]) (2.4615796189876105, array([ 0.325, 1.226, 1.961, 2.718, 3.752]), 0.03134990135800783) The null hypothesis cannot be rejected for three samples from an identical distribution. The approximate p-value (87%) has to be computed by extrapolation and may not be very accurate: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(size=30), np.random.normal(size=20)]) (-0.73091722665244196, array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]), 0.8789283903979661) """ k = len(samples) if (k < 2): raise ValueError("anderson_ksamp needs at least two samples") samples = list(map(np.asarray, samples)) Z = np.sort(np.hstack(samples)) N = Z.size Zstar = np.unique(Z) if Zstar.size < 2: raise ValueError("anderson_ksamp needs more than one distinct " "observation") n = np.array([sample.size for sample in samples]) if any(n == 0): raise ValueError("anderson_ksamp encountered sample without " "observations") if midrank: A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N) else: A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N) H = (1. / n).sum() hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() h = hs_cs[-1] + 1 g = (hs_cs / arange(2, N)).sum() a = (4*g - 6) * (k - 1) + (10 - 6*g)*H b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h d = (2*h + 6)*k**2 - 4*h*k sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) m = k - 1 A2 = (A2kN - m) / math.sqrt(sigmasq) # The b_i values are the interpolation coefficients from Table 2 # of Scholz and Stephens 1987 b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326]) b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822]) b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396]) critical = b0 + b1 / math.sqrt(m) + b2 / m pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2) if A2 < critical.min() or A2 > critical.max(): warnings.warn("approximate p-value will be computed by extrapolation") p = math.exp(np.polyval(pf, A2)) Anderson_ksampResult = namedtuple('Anderson_ksampResult', ('statistic', 'critical_values', 'significance_level')) return Anderson_ksampResult(A2, critical, p) def ansari(x, y): """ Perform the Ansari-Bradley test for equal scale parameters The Ansari-Bradley test is a non-parametric test for the equality of the scale parameter of the distributions from which two samples were drawn. Parameters ---------- x, y : array_like arrays of sample data Returns ------- statistic : float The Ansari-Bradley test statistic pvalue : float The p-value of the hypothesis test See Also -------- fligner : A non-parametric test for the equality of k variances mood : A non-parametric test for the equality of two scale parameters Notes ----- The p-value given is exact when the sample sizes are both less than 55 and there are no ties, otherwise a normal approximation for the p-value is used. References ---------- .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. """ x, y = asarray(x), asarray(y) n = len(x) m = len(y) if m < 1: raise ValueError("Not enough other observations.") if n < 1: raise ValueError("Not enough test observations.") AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) N = m + n xy = r_[x, y] # combine rank = stats.rankdata(xy) symrank = amin(array((rank, N - rank + 1)), 0) AB = sum(symrank[:n], axis=0) uxy = unique(xy) repeats = (len(uxy) != len(xy)) exact = ((m < 55) and (n < 55) and not repeats) if repeats and (m < 55 or n < 55): warnings.warn("Ties preclude use of exact statistic.") if exact: astart, a1, ifault = statlib.gscale(n, m) ind = AB - astart total = sum(a1, axis=0) if ind < len(a1)/2.0: cind = int(ceil(ind)) if ind == cind: pval = 2.0 * sum(a1[:cind+1], axis=0) / total else: pval = 2.0 * sum(a1[:cind], axis=0) / total else: find = int(floor(ind)) if ind == floor(ind): pval = 2.0 * sum(a1[find:], axis=0) / total else: pval = 2.0 * sum(a1[find+1:], axis=0) / total return AnsariResult(AB, min(1.0, pval)) # otherwise compute normal approximation if N % 2: # N odd mnAB = n * (N+1.0)**2 / 4.0 / N varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) else: mnAB = n * (N+2.0) / 4.0 varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) if repeats: # adjust variance estimates # compute sum(tj * rj**2,axis=0) fac = sum(symrank**2, axis=0) if N % 2: # N odd varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) else: # N even varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) z = (AB - mnAB) / sqrt(varAB) pval = distributions.norm.sf(abs(z)) * 2.0 return AnsariResult(AB, pval) def bartlett(*args): """ Perform Bartlett's test for equal variances Bartlett's test tests the null hypothesis that all input samples are from populations with equal variances. For samples from significantly non-normal populations, Levene's test `levene` is more robust. Parameters ---------- sample1, sample2,... : array_like arrays of sample data. May be different lengths. Returns ------- statistic : float The test statistic. pvalue : float The p-value of the test. See Also -------- fligner : A non-parametric test for the equality of k variances levene : A robust parametric test for equality of k variances Notes ----- Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical Tests. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. """ BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) # Handle empty input for a in args: if np.asanyarray(a).size == 0: return BartlettResult(np.nan, np.nan) k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) ssq = zeros(k, 'd') for j in range(k): Ni[j] = len(args[j]) ssq[j] = np.var(args[j], ddof=1) Ntot = sum(Ni, axis=0) spsq = sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) numer = (Ntot*1.0 - k) * log(spsq) - sum((Ni - 1.0)*log(ssq), axis=0) denom = 1.0 + 1.0/(3*(k - 1)) * ((sum(1.0/(Ni - 1.0), axis=0)) - 1.0/(Ntot - k)) T = numer / denom pval = distributions.chi2.sf(T, k - 1) # 1 - cdf return BartlettResult(T, pval) def levene(*args, **kwds): """ Perform Levene test for equal variances. The Levene test tests the null hypothesis that all input samples are from populations with equal variances. Levene's test is an alternative to Bartlett's test `bartlett` in the case where there are significant deviations from normality. Parameters ---------- sample1, sample2, ... : array_like The sample data, possibly with different lengths center : {'mean', 'median', 'trimmed'}, optional Which function of the data to use in the test. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the test. Notes ----- Three variations of Levene's test are possible. The possibilities and their recommended usages are: * 'median' : Recommended for skewed (non-normal) distributions> * 'mean' : Recommended for symmetric, moderate-tailed distributions. * 'trimmed' : Recommended for heavy-tailed distributions. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: Essays in Honor of Harold Hotelling, I. Olkin et al. eds., Stanford University Press, pp. 278-292. .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American Statistical Association, 69, 364-367 """ # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("levene() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) Yci = zeros(k, 'd') if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(np.sort(arg), proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) for j in range(k): Ni[j] = len(args[j]) Yci[j] = func(args[j]) Ntot = sum(Ni, axis=0) # compute Zij's Zij = [None] * k for i in range(k): Zij[i] = abs(asarray(args[i]) - Yci[i]) # compute Zbari Zbari = zeros(k, 'd') Zbar = 0.0 for i in range(k): Zbari[i] = np.mean(Zij[i], axis=0) Zbar += Zbari[i] * Ni[i] Zbar /= Ntot numer = (Ntot - k) * sum(Ni * (Zbari - Zbar)**2, axis=0) # compute denom_variance dvar = 0.0 for i in range(k): dvar += sum((Zij[i] - Zbari[i])**2, axis=0) denom = (k - 1.0) * dvar W = numer / denom pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) return LeveneResult(W, pval) @setastest(False) def binom_test(x, n=None, p=0.5, alternative='two-sided'): """ Perform a test that the probability of success is p. This is an exact, two-sided test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Parameters ---------- x : integer or array_like the number of successes, or if x has length 2, it is the number of successes and the number of failures. n : integer the number of trials. This is ignored if x gives both the number of successes and failures p : float, optional The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5 Returns ------- p-value : float The p-value of the hypothesis test References ---------- .. [1] http://en.wikipedia.org/wiki/Binomial_test """ x = atleast_1d(x).astype(np.integer) if len(x) == 2: n = x[1] + x[0] x = x[0] elif len(x) == 1: x = x[0] if n is None or n < x: raise ValueError("n must be >= x") n = np.int_(n) else: raise ValueError("Incorrect length for x.") if (p > 1.0) or (p < 0.0): raise ValueError("p must be in range [0,1]") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") if alternative == 'less': pval = distributions.binom.cdf(x, n, p) return pval if alternative == 'greater': pval = distributions.binom.sf(x-1, n, p) return pval # if alternative was neither 'less' nor 'greater', then it's 'two-sided' d = distributions.binom.pmf(x, n, p) rerr = 1 + 1e-7 if x == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif x < p * n: i = np.arange(np.ceil(p * n), n+1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(x, n, p) + distributions.binom.sf(n - y, n, p)) else: i = np.arange(np.floor(p*n) + 1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(y-1, n, p) + distributions.binom.sf(x-1, n, p)) return min(1.0, pval) def _apply_func(x, g, func): # g is list of indices into x # separating x into different groups # func should be applied over the groups g = unique(r_[0, g, len(x)]) output = [] for k in range(len(g) - 1): output.append(func(x[g[k]:g[k+1]])) return asarray(output) def fligner(*args, **kwds): """ Perform Fligner-Killeen test for equality of variance. Fligner's test tests the null hypothesis that all input samples are from populations with equal variances. Fligner-Killeen's test is distribution free when populations are identical [2]_. Parameters ---------- sample1, sample2, ... : array_like Arrays of sample data. Need not be the same length. center : {'mean', 'median', 'trimmed'}, optional Keyword argument controlling which function of the data is used in computing the test statistic. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the hypothesis test. See Also -------- bartlett : A parametric test for equality of k variances in normal samples levene : A robust parametric test for equality of k variances Notes ----- As with Levene's test there are three variants of Fligner's test that differ by the measure of central tendency used in the test. See `levene` for more information. Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample tests for scale. 'Journal of the American Statistical Association.' 71(353), 210-213. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A comparative study of tests for homogeneity of variances, with applications to the outer continental shelf biding data. Technometrics, 23(4), 351-361. """ FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) # Handle empty input for a in args: if np.asanyarray(a).size == 0: return FlignerResult(np.nan, np.nan) # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("fligner() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" + "or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) Ni = asarray([len(args[j]) for j in range(k)]) Yci = asarray([func(args[j]) for j in range(k)]) Ntot = sum(Ni, axis=0) # compute Zij's Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)] allZij = [] g = [0] for i in range(k): allZij.extend(list(Zij[i])) g.append(len(allZij)) ranks = stats.rankdata(allZij) a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) # compute Aibar Aibar = _apply_func(a, g, sum) / Ni anbar = np.mean(a, axis=0) varsq = np.var(a, axis=0, ddof=1) Xsq = sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf return FlignerResult(Xsq, pval) def mood(x, y, axis=0): """ Perform Mood's test for equal scale parameters. Mood's two-sample test for scale parameters is a non-parametric test for the null hypothesis that two samples are drawn from the same distribution with the same scale parameter. Parameters ---------- x, y : array_like Arrays of sample data. axis : int, optional The axis along which the samples are tested. `x` and `y` can be of different length along `axis`. If `axis` is None, `x` and `y` are flattened and the test is done on all values in the flattened arrays. Returns ------- z : scalar or ndarray The z-score for the hypothesis test. For 1-D inputs a scalar is returned. p-value : scalar ndarray The p-value for the hypothesis test. See Also -------- fligner : A non-parametric test for the equality of k variances ansari : A non-parametric test for the equality of 2 variances bartlett : A parametric test for equality of k variances in normal samples levene : A parametric test for equality of k variances Notes ----- The data are assumed to be drawn from probability distributions ``f(x)`` and ``f(x/s) / s`` respectively, for some probability density function f. The null hypothesis is that ``s == 1``. For multi-dimensional arrays, if the inputs are of shapes ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the resulting z and p values will have shape ``(n0, n2, n3)``. Note that ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. Examples -------- >>> from scipy import stats >>> x2 = np.random.randn(2, 45, 6, 7) >>> x1 = np.random.randn(2, 30, 6, 7) >>> z, p = stats.mood(x1, x2, axis=1) >>> p.shape (2, 6, 7) Find the number of points where the difference in scale is not significant: >>> (p > 0.1).sum() 74 Perform the test with different scales: >>> x1 = np.random.randn(2, 30) >>> x2 = np.random.randn(2, 35) * 10.0 >>> stats.mood(x1, x2, axis=1) (array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08])) """ x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) if axis is None: x = x.flatten() y = y.flatten() axis = 0 # Determine shape of the result arrays res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if ax != axis])): raise ValueError("Dimensions of x and y on all axes except `axis` " "should match") n = x.shape[axis] m = y.shape[axis] N = m + n if N < 3: raise ValueError("Not enough observations.") xy = np.concatenate((x, y), axis=axis) if axis != 0: xy = np.rollaxis(xy, axis) xy = xy.reshape(xy.shape[0], -1) # Generalized to the n-dimensional case by adding the axis argument, and # using for loops, since rankdata is not vectorized. For improving # performance consider vectorizing rankdata function. all_ranks = np.zeros_like(xy) for j in range(xy.shape[1]): all_ranks[:, j] = stats.rankdata(xy[:, j]) Ri = all_ranks[:n] M = sum((Ri - (N + 1.0) / 2)**2, axis=0) # Approx stat. mnM = n * (N * N - 1.0) / 12 varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 z = (M - mnM) / sqrt(varM) # sf for right tail, cdf for left tail. Factor 2 for two-sidedness z_pos = z > 0 pval = np.zeros_like(z) pval[z_pos] = 2 * distributions.norm.sf(z[z_pos]) pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos]) if res_shape == (): # Return scalars, not 0-D arrays z = z[0] pval = pval[0] else: z.shape = res_shape pval.shape = res_shape return z, pval def wilcoxon(x, y=None, zero_method="wilcox", correction=False): """ Calculate the Wilcoxon signed-rank test. The Wilcoxon signed-rank test tests the null hypothesis that two related paired samples come from the same distribution. In particular, it tests whether the distribution of the differences x - y is symmetric about zero. It is a non-parametric version of the paired T-test. Parameters ---------- x : array_like The first set of measurements. y : array_like, optional The second set of measurements. If `y` is not given, then the `x` array is considered to be the differences between the two sets of measurements. zero_method : string, {"pratt", "wilcox", "zsplit"}, optional "pratt": Pratt treatment: includes zero-differences in the ranking process (more conservative) "wilcox": Wilcox treatment: discards all zero-differences "zsplit": Zero rank split: just like Pratt, but spliting the zero rank between positive and negative ones correction : bool, optional If True, apply continuity correction by adjusting the Wilcoxon rank statistic by 0.5 towards the mean value when computing the z-statistic. Default is False. Returns ------- statistic : float The sum of the ranks of the differences above or below zero, whichever is smaller. pvalue : float The two-sided p-value for the test. Notes ----- Because the normal approximation is used for the calculations, the samples used should be large. A typical rule is to require that n > 20. References ---------- .. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test """ if zero_method not in ["wilcox", "pratt", "zsplit"]: raise ValueError("Zero method should be either 'wilcox' " "or 'pratt' or 'zsplit'") if y is None: d = x else: x, y = map(asarray, (x, y)) if len(x) != len(y): raise ValueError('Unequal N in wilcoxon. Aborting.') d = x - y if zero_method == "wilcox": # Keep all non-zero differences d = compress(np.not_equal(d, 0), d, axis=-1) count = len(d) if count < 10: warnings.warn("Warning: sample size too small for normal approximation.") r = stats.rankdata(abs(d)) r_plus = sum((d > 0) * r, axis=0) r_minus = sum((d < 0) * r, axis=0) if zero_method == "zsplit": r_zero = sum((d == 0) * r, axis=0) r_plus += r_zero / 2. r_minus += r_zero / 2. T = min(r_plus, r_minus) mn = count * (count + 1.) * 0.25 se = count * (count + 1.) * (2. * count + 1.) if zero_method == "pratt": r = r[d != 0] replist, repnum = find_repeats(r) if repnum.size != 0: # Correction for repeated elements. se -= 0.5 * (repnum * (repnum * repnum - 1)).sum() se = sqrt(se / 24) correction = 0.5 * int(bool(correction)) * np.sign(T - mn) z = (T - mn - correction) / se prob = 2. * distributions.norm.sf(abs(z)) WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue')) return WilcoxonResult(T, prob) @setastest(False) def median_test(*args, **kwds): """ Mood's median test. Test that two or more samples come from populations with the same median. Let ``n = len(args)`` be the number of samples. The "grand median" of all the data is computed, and a contingency table is formed by classifying the values in each sample as being above or below the grand median. The contingency table, along with `correction` and `lambda_`, are passed to `scipy.stats.chi2_contingency` to compute the test statistic and p-value. Parameters ---------- sample1, sample2, ... : array_like The set of samples. There must be at least two samples. Each sample must be a one-dimensional sequence containing at least one value. The samples are not required to have the same length. ties : str, optional Determines how values equal to the grand median are classified in the contingency table. The string must be one of:: "below": Values equal to the grand median are counted as "below". "above": Values equal to the grand median are counted as "above". "ignore": Values equal to the grand median are not counted. The default is "below". correction : bool, optional If True, *and* there are just two samples, apply Yates' correction for continuity when computing the test statistic associated with the contingency table. Default is True. lambda_ : float or str, optional. By default, the statistic computed in this test is Pearson's chi-squared statistic. `lambda_` allows a statistic from the Cressie-Read power divergence family to be used instead. See `power_divergence` for details. Default is 1 (Pearson's chi-squared statistic). Returns ------- stat : float The test statistic. The statistic that is returned is determined by `lambda_`. The default is Pearson's chi-squared statistic. p : float The p-value of the test. m : float The grand median. table : ndarray The contingency table. The shape of the table is (2, n), where n is the number of samples. The first row holds the counts of the values above the grand median, and the second row holds the counts of the values below the grand median. The table allows further analysis with, for example, `scipy.stats.chi2_contingency`, or with `scipy.stats.fisher_exact` if there are two samples, without having to recompute the table. See Also -------- kruskal : Compute the Kruskal-Wallis H-test for independent samples. mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. Notes ----- .. versionadded:: 0.15.0 References ---------- .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill (1950), pp. 394-399. .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). See Sections 8.12 and 10.15. Examples -------- A biologist runs an experiment in which there are three groups of plants. Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. Each plant produces a number of seeds. The seed counts for each group are:: Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 The following code applies Mood's median test to these samples. >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] >>> from scipy.stats import median_test >>> stat, p, med, tbl = median_test(g1, g2, g3) The median is >>> med 34.0 and the contingency table is >>> tbl array([[ 5, 10, 7], [11, 5, 10]]) `p` is too large to conclude that the medians are not the same: >>> p 0.12609082774093244 The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to `median_test`. >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood") >>> p 0.12224779737117837 The median occurs several times in the data, so we'll get a different result if, for example, ``ties="above"`` is used: >>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above") >>> p 0.063873276069553273 >>> tbl array([[ 5, 11, 9], [11, 4, 8]]) This example demonstrates that if the data set is not large and there are values equal to the median, the p-value can be sensitive to the choice of `ties`. """ ties = kwds.pop('ties', 'below') correction = kwds.pop('correction', True) lambda_ = kwds.pop('lambda_', None) if len(kwds) > 0: bad_kwd = kwds.keys()[0] raise TypeError("median_test() got an unexpected keyword " "argument %r" % bad_kwd) if len(args) < 2: raise ValueError('median_test requires two or more samples.') ties_options = ['below', 'above', 'ignore'] if ties not in ties_options: raise ValueError("invalid 'ties' option '%s'; 'ties' must be one " "of: %s" % (ties, str(ties_options)[1:-1])) data = [np.asarray(arg) for arg in args] # Validate the sizes and shapes of the arguments. for k, d in enumerate(data): if d.size == 0: raise ValueError("Sample %d is empty. All samples must " "contain at least one value." % (k + 1)) if d.ndim != 1: raise ValueError("Sample %d has %d dimensions. All " "samples must be one-dimensional sequences." % (k + 1, d.ndim)) grand_median = np.median(np.concatenate(data)) # Create the contingency table. table = np.zeros((2, len(data)), dtype=np.int64) for k, sample in enumerate(data): nabove = count_nonzero(sample > grand_median) nbelow = count_nonzero(sample < grand_median) nequal = sample.size - (nabove + nbelow) table[0, k] += nabove table[1, k] += nbelow if ties == "below": table[1, k] += nequal elif ties == "above": table[0, k] += nequal # Check that no row or column of the table is all zero. # Such a table can not be given to chi2_contingency, because it would have # a zero in the table of expected frequencies. rowsums = table.sum(axis=1) if rowsums[0] == 0: raise ValueError("All values are below the grand median (%r)." % grand_median) if rowsums[1] == 0: raise ValueError("All values are above the grand median (%r)." % grand_median) if ties == "ignore": # We already checked that each sample has at least one value, but it # is possible that all those values equal the grand median. If `ties` # is "ignore", that would result in a column of zeros in `table`. We # check for that case here. zero_cols = np.where((table == 0).all(axis=0))[0] if len(zero_cols) > 0: msg = ("All values in sample %d are equal to the grand " "median (%r), so they are ignored, resulting in an " "empty sample." % (zero_cols[0] + 1, grand_median)) raise ValueError(msg) stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, correction=correction) return stat, p, grand_median, table def _hermnorm(N): # return the negatively normalized hermite polynomials up to order N-1 # (inclusive) # using the recursive relationship # p_n+1 = p_n(x)' - x*p_n(x) # and p_0(x) = 1 plist = [None] * N plist[0] = poly1d(1) for n in range(1, N): plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1] return plist # Note: when removing pdf_fromgamma, also remove the _hermnorm support function @np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 " "in favour of statsmodels.distributions.ExpandedNormal.") def pdf_fromgamma(g1, g2, g3=0.0, g4=None): if g4 is None: g4 = 3 * g2**2 sigsq = 1.0 / g2 sig = sqrt(sigsq) mu = g1 * sig**3.0 p12 = _hermnorm(13) for k in range(13): p12[k] /= sig**k # Add all of the terms to polynomial totp = (p12[0] - g1/6.0*p12[3] + g2/24.0*p12[4] + g1**2/72.0 * p12[6] - g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] + g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] + g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12]) # Final normalization totp = totp / sqrt(2*pi) / sig def thefunc(x): xn = (x - mu) / sig return totp(xn) * exp(-xn**2 / 2.) return thefunc def _circfuncs_common(samples, high, low): samples = np.asarray(samples) if samples.size == 0: return np.nan, np.nan ang = (samples - low)*2*pi / (high - low) return samples, ang def circmean(samples, high=2*pi, low=0, axis=None): """ Compute the circular mean for samples in a range. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular mean range. Default is ``2*pi``. low : float or int, optional Low boundary for circular mean range. Default is 0. axis : int, optional Axis along which means are computed. The default is to compute the mean of the flattened array. Returns ------- circmean : float Circular mean. """ samples, ang = _circfuncs_common(samples, high, low) res = angle(np.mean(exp(1j * ang), axis=axis)) mask = res < 0 if mask.ndim > 0: res[mask] += 2*pi elif mask: res += 2*pi return res*(high - low)/2.0/pi + low def circvar(samples, high=2*pi, low=0, axis=None): """ Compute the circular variance for samples assumed to be in a range Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular variance range. Default is 0. high : float or int, optional High boundary for circular variance range. Default is ``2*pi``. axis : int, optional Axis along which variances are computed. The default is to compute the variance of the flattened array. Returns ------- circvar : float Circular variance. Notes ----- This uses a definition of circular variance that in the limit of small angles returns a number close to the 'linear' variance. """ samples, ang = _circfuncs_common(samples, high, low) res = np.mean(exp(1j * ang), axis=axis) R = abs(res) return ((high - low)/2.0/pi)**2 * 2 * log(1/R) def circstd(samples, high=2*pi, low=0, axis=None): """ Compute the circular standard deviation for samples assumed to be in the range [low to high]. Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular standard deviation range. Default is 0. high : float or int, optional High boundary for circular standard deviation range. Default is ``2*pi``. axis : int, optional Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array. Returns ------- circstd : float Circular standard deviation. Notes ----- This uses a definition of circular standard deviation that in the limit of small angles returns a number close to the 'linear' standard deviation. """ samples, ang = _circfuncs_common(samples, high, low) res = np.mean(exp(1j * ang), axis=axis) R = abs(res) return ((high - low)/2.0/pi) * sqrt(-2*log(R)) # Tests to include (from R) -- some of these already in stats. ######## # X Ansari-Bradley # X Bartlett (and Levene) # X Binomial # Y Pearson's Chi-squared (stats.chisquare) # Y Association Between Paired samples (stats.pearsonr, stats.spearmanr) # stats.kendalltau) -- these need work though # Fisher's exact test # X Fligner-Killeen Test # Y Friedman Rank Sum (stats.friedmanchisquare?) # Y Kruskal-Wallis # Y Kolmogorov-Smirnov # Cochran-Mantel-Haenszel Chi-Squared for Count # McNemar's Chi-squared for Count # X Mood Two-Sample # X Test For Equal Means in One-Way Layout (see stats.ttest also) # Pairwise Comparisons of proportions # Pairwise t tests # Tabulate p values for pairwise comparisons # Pairwise Wilcoxon rank sum tests # Power calculations two sample test of prop. # Power calculations for one and two sample t tests # Equal or Given Proportions # Trend in Proportions # Quade Test # Y Student's T Test # Y F Test to compare two variances # XY Wilcoxon Rank Sum and Signed Rank Tests
bsd-3-clause
advancedplotting/aplot
docs/cmap.py
1
1298
""" Generates colormap documentation. """ from matplotlib import pyplot as plt import numpy as np from collections import OrderedDict MAPS = OrderedDict([ ('Rainbow', 'jet'), ('Hot and Cold', 'seismic'), ('White to Black', 'binary'), ('White to Blue', 'Blues'), ('White to Green', 'Greens'), ('White to Red', 'Reds'), ('Fire', 'hot'), ('Water', 'ocean'), ('Earth', 'gist_earth'), ('Air', 'cool'), ('Pastel', 'Accent'), ('Banded', 'Paired'), ('Stepped', 'prism') ]) def plot_color_gradients(): fig, axes = plt.subplots(nrows=len(MAPS)) fig.subplots_adjust(top=0.99, bottom=0.01, left=0.01, right=0.79) for ax, name in zip(axes, MAPS.keys()): ax.imshow(gradient, aspect='auto', cmap=MAPS[name]) pos = list(ax.get_position().bounds) x_text = pos[2] + 0.02 y_text = pos[1] + pos[3]/2. fig.text(x_text, y_text, name, va='center', ha='left', fontsize=14) # Turn off *all* ticks & spines, not just the ones with colormaps. for ax in axes: ax.set_axis_off() gradient = np.linspace(0, 1, 256) gradient = np.vstack((gradient, gradient)) plot_color_gradients() plt.show()
bsd-3-clause
raghavrv/scikit-learn
sklearn/covariance/tests/test_robust_covariance.py
17
5467
# Author: Alexandre Gramfort <[email protected]> # Gael Varoquaux <[email protected]> # Virgile Fritsch <[email protected]> # # License: BSD 3 clause import itertools import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.exceptions import NotFittedError from sklearn import datasets from sklearn.covariance import empirical_covariance, MinCovDet, \ EllipticEnvelope from sklearn.covariance import fast_mcd X = datasets.load_iris().data X_1d = X[:, 0] n_samples, n_features = X.shape def test_mcd(): # Tests the FastMCD algorithm implementation # Small data set # test without outliers (random independent normal data) launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80) # test with a contaminated data set (medium contamination) launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70) # test with a contaminated data set (strong contamination) launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50) # Medium data set launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540) # Large data set launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870) # 1D data set launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350) def test_fast_mcd_on_invalid_input(): X = np.arange(100) assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead', fast_mcd, X) def test_mcd_class_on_invalid_input(): X = np.arange(100) mcd = MinCovDet() assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead', mcd.fit, X) def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support): rand_gen = np.random.RandomState(0) data = rand_gen.randn(n_samples, n_features) # add some outliers outliers_index = rand_gen.permutation(n_samples)[:n_outliers] outliers_offset = 10. * \ (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5) data[outliers_index] += outliers_offset inliers_mask = np.ones(n_samples).astype(bool) inliers_mask[outliers_index] = False pure_data = data[inliers_mask] # compute MCD by fitting an object mcd_fit = MinCovDet(random_state=rand_gen).fit(data) T = mcd_fit.location_ S = mcd_fit.covariance_ H = mcd_fit.support_ # compare with the estimates learnt from the inliers error_location = np.mean((pure_data.mean(0) - T) ** 2) assert(error_location < tol_loc) error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2) assert(error_cov < tol_cov) assert(np.sum(H) >= tol_support) assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_) def test_mcd_issue1127(): # Check that the code does not break with X.shape = (3, 1) # (i.e. n_support = n_samples) rnd = np.random.RandomState(0) X = rnd.normal(size=(3, 1)) mcd = MinCovDet() mcd.fit(X) def test_mcd_issue3367(): # Check that MCD completes when the covariance matrix is singular # i.e. one of the rows and columns are all zeros rand_gen = np.random.RandomState(0) # Think of these as the values for X and Y -> 10 values between -5 and 5 data_values = np.linspace(-5, 5, 10).tolist() # Get the cartesian product of all possible coordinate pairs from above set data = np.array(list(itertools.product(data_values, data_values))) # Add a third column that's all zeros to make our data a set of point # within a plane, which means that the covariance matrix will be singular data = np.hstack((data, np.zeros((data.shape[0], 1)))) # The below line of code should raise an exception if the covariance matrix # is singular. As a further test, since we have points in XYZ, the # principle components (Eigenvectors) of these directly relate to the # geometry of the points. Since it's a plane, we should be able to test # that the Eigenvector that corresponds to the smallest Eigenvalue is the # plane normal, specifically [0, 0, 1], since everything is in the XY plane # (as I've set it up above). To do this one would start by: # # evals, evecs = np.linalg.eigh(mcd_fit.covariance_) # normal = evecs[:, np.argmin(evals)] # # After which we need to assert that our `normal` is equal to [0, 0, 1]. # Do note that there is floating point error associated with this, so it's # best to subtract the two and then compare some small tolerance (e.g. # 1e-12). MinCovDet(random_state=rand_gen).fit(data) def test_outlier_detection(): rnd = np.random.RandomState(0) X = rnd.randn(100, 10) clf = EllipticEnvelope(contamination=0.1) assert_raises(NotFittedError, clf.predict, X) assert_raises(NotFittedError, clf.decision_function, X) clf.fit(X) y_pred = clf.predict(X) decision = clf.decision_function(X, raw_values=True) decision_transformed = clf.decision_function(X, raw_values=False) assert_array_almost_equal( decision, clf.mahalanobis(X)) assert_array_almost_equal(clf.mahalanobis(X), clf.dist_) assert_almost_equal(clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.) assert(sum(y_pred == -1) == sum(decision_transformed < 0))
bsd-3-clause
zfrenchee/pandas
pandas/tests/indexes/test_base.py
1
86680
# -*- coding: utf-8 -*- import pytest from datetime import datetime, timedelta from collections import defaultdict import pandas.util.testing as tm from pandas.core.dtypes.common import is_unsigned_integer_dtype from pandas.core.indexes.api import Index, MultiIndex from pandas.tests.indexes.common import Base from pandas.compat import (range, lrange, lzip, u, text_type, zip, PY3, PY36, PYPY) import operator import numpy as np from pandas import (period_range, date_range, Series, DataFrame, Float64Index, Int64Index, UInt64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, isna) from pandas.core.index import _get_combined_index, _ensure_index_from_sequences from pandas.util.testing import assert_almost_equal from pandas.compat.numpy import np_datetime64_compat import pandas.core.config as cf from pandas.core.indexes.datetimes import _to_m8 import pandas as pd from pandas._libs.lib import Timestamp class TestIndex(Base): _holder = Index def setup_method(self, method): self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100), strIndex=tm.makeStringIndex(100), dateIndex=tm.makeDateIndex(100), periodIndex=tm.makePeriodIndex(100), tdIndex=tm.makeTimedeltaIndex(100), intIndex=tm.makeIntIndex(100), uintIndex=tm.makeUIntIndex(100), rangeIndex=tm.makeIntIndex(100), floatIndex=tm.makeFloatIndex(100), boolIndex=Index([True, False]), catIndex=tm.makeCategoricalIndex(100), empty=Index([]), tuples=MultiIndex.from_tuples(lzip( ['foo', 'bar', 'baz'], [1, 2, 3])), repeats=Index([0, 0, 1, 1, 2, 2])) self.setup_indices() def create_index(self): return Index(list('abcde')) def test_new_axis(self): new_index = self.dateIndex[None, :] assert new_index.ndim == 2 assert isinstance(new_index, np.ndarray) def test_copy_and_deepcopy(self, indices): super(TestIndex, self).test_copy_and_deepcopy(indices) new_copy2 = self.intIndex.copy(dtype=int) assert new_copy2.dtype.kind == 'i' def test_constructor(self): # regular instance creation tm.assert_contains_all(self.strIndex, self.strIndex) tm.assert_contains_all(self.dateIndex, self.dateIndex) # casting arr = np.array(self.strIndex) index = Index(arr) tm.assert_contains_all(arr, index) tm.assert_index_equal(self.strIndex, index) # copy arr = np.array(self.strIndex) index = Index(arr, copy=True, name='name') assert isinstance(index, Index) assert index.name == 'name' tm.assert_numpy_array_equal(arr, index.values) arr[0] = "SOMEBIGLONGSTRING" assert index[0] != "SOMEBIGLONGSTRING" # what to do here? # arr = np.array(5.) # pytest.raises(Exception, arr.view, Index) def test_constructor_corner(self): # corner case pytest.raises(TypeError, Index, 0) def test_construction_list_mixed_tuples(self): # see gh-10697: if we are constructing from a mixed list of tuples, # make sure that we are independent of the sorting order. idx1 = Index([('A', 1), 'B']) assert isinstance(idx1, Index) assert not isinstance(idx1, MultiIndex) idx2 = Index(['B', ('A', 1)]) assert isinstance(idx2, Index) assert not isinstance(idx2, MultiIndex) @pytest.mark.parametrize('na_value', [None, np.nan]) @pytest.mark.parametrize('vtype', [list, tuple, iter]) def test_construction_list_tuples_nan(self, na_value, vtype): # GH 18505 : valid tuples containing NaN values = [(1, 'two'), (3., na_value)] result = Index(vtype(values)) expected = MultiIndex.from_tuples(values) tm.assert_index_equal(result, expected) def test_constructor_from_index_datetimetz(self): idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3, tz='US/Eastern') result = pd.Index(idx) tm.assert_index_equal(result, idx) assert result.tz == idx.tz result = pd.Index(idx.astype(object)) tm.assert_index_equal(result, idx) assert result.tz == idx.tz def test_constructor_from_index_timedelta(self): idx = pd.timedelta_range('1 days', freq='D', periods=3) result = pd.Index(idx) tm.assert_index_equal(result, idx) result = pd.Index(idx.astype(object)) tm.assert_index_equal(result, idx) def test_constructor_from_index_period(self): idx = pd.period_range('2015-01-01', freq='D', periods=3) result = pd.Index(idx) tm.assert_index_equal(result, idx) result = pd.Index(idx.astype(object)) tm.assert_index_equal(result, idx) def test_constructor_from_series_datetimetz(self): idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3, tz='US/Eastern') result = pd.Index(pd.Series(idx)) tm.assert_index_equal(result, idx) assert result.tz == idx.tz def test_constructor_from_series_timedelta(self): idx = pd.timedelta_range('1 days', freq='D', periods=3) result = pd.Index(pd.Series(idx)) tm.assert_index_equal(result, idx) def test_constructor_from_series_period(self): idx = pd.period_range('2015-01-01', freq='D', periods=3) result = pd.Index(pd.Series(idx)) tm.assert_index_equal(result, idx) def test_constructor_from_series(self): expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')]) s = Series([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')]) result = Index(s) tm.assert_index_equal(result, expected) result = DatetimeIndex(s) tm.assert_index_equal(result, expected) # GH 6273 # create from a series, passing a freq s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'])) result = DatetimeIndex(s, freq='MS') expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'], freq='MS') tm.assert_index_equal(result, expected) df = pd.DataFrame(np.random.rand(5, 3)) df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'] result = DatetimeIndex(df['date'], freq='MS') expected.name = 'date' tm.assert_index_equal(result, expected) assert df['date'].dtype == object exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'], name='date') tm.assert_series_equal(df['date'], exp) # GH 6274 # infer freq of same result = pd.infer_freq(df['date']) assert result == 'MS' def test_constructor_ndarray_like(self): # GH 5460#issuecomment-44474502 # it should be possible to convert any object that satisfies the numpy # ndarray interface directly into an Index class ArrayLike(object): def __init__(self, array): self.array = array def __array__(self, dtype=None): return self.array for array in [np.arange(5), np.array(['a', 'b', 'c']), date_range('2000-01-01', periods=3).values]: expected = pd.Index(array) result = pd.Index(ArrayLike(array)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('dtype', [ int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32', 'uint16', 'uint8']) def test_constructor_int_dtype_float(self, dtype): # GH 18400 if is_unsigned_integer_dtype(dtype): index_type = UInt64Index else: index_type = Int64Index expected = index_type([0, 1, 2, 3]) result = Index([0., 1., 2., 3.], dtype=dtype) tm.assert_index_equal(result, expected) def test_constructor_int_dtype_nan(self): # see gh-15187 data = [np.nan] msg = "cannot convert" with tm.assert_raises_regex(ValueError, msg): Index(data, dtype='int64') with tm.assert_raises_regex(ValueError, msg): Index(data, dtype='uint64') # This, however, should not break # because NaN is float. expected = Float64Index(data) result = Index(data, dtype='float') tm.assert_index_equal(result, expected) def test_index_ctor_infer_nan_nat(self): # GH 13467 exp = pd.Float64Index([np.nan, np.nan]) assert exp.dtype == np.float64 tm.assert_index_equal(Index([np.nan, np.nan]), exp) tm.assert_index_equal(Index(np.array([np.nan, np.nan])), exp) exp = pd.DatetimeIndex([pd.NaT, pd.NaT]) assert exp.dtype == 'datetime64[ns]' tm.assert_index_equal(Index([pd.NaT, pd.NaT]), exp) tm.assert_index_equal(Index(np.array([pd.NaT, pd.NaT])), exp) exp = pd.DatetimeIndex([pd.NaT, pd.NaT]) assert exp.dtype == 'datetime64[ns]' for data in [[pd.NaT, np.nan], [np.nan, pd.NaT], [np.nan, np.datetime64('nat')], [np.datetime64('nat'), np.nan]]: tm.assert_index_equal(Index(data), exp) tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) exp = pd.TimedeltaIndex([pd.NaT, pd.NaT]) assert exp.dtype == 'timedelta64[ns]' for data in [[np.nan, np.timedelta64('nat')], [np.timedelta64('nat'), np.nan], [pd.NaT, np.timedelta64('nat')], [np.timedelta64('nat'), pd.NaT]]: tm.assert_index_equal(Index(data), exp) tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) # mixed np.datetime64/timedelta64 nat results in object data = [np.datetime64('nat'), np.timedelta64('nat')] exp = pd.Index(data, dtype=object) tm.assert_index_equal(Index(data), exp) tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) data = [np.timedelta64('nat'), np.datetime64('nat')] exp = pd.Index(data, dtype=object) tm.assert_index_equal(Index(data), exp) tm.assert_index_equal(Index(np.array(data, dtype=object)), exp) def test_index_ctor_infer_periodindex(self): xp = period_range('2012-1-1', freq='M', periods=3) rs = Index(xp) tm.assert_index_equal(rs, xp) assert isinstance(rs, PeriodIndex) def test_constructor_simple_new(self): idx = Index([1, 2, 3, 4, 5], name='int') result = idx._simple_new(idx, 'int') tm.assert_index_equal(result, idx) idx = Index([1.1, np.nan, 2.2, 3.0], name='float') result = idx._simple_new(idx, 'float') tm.assert_index_equal(result, idx) idx = Index(['A', 'B', 'C', np.nan], name='obj') result = idx._simple_new(idx, 'obj') tm.assert_index_equal(result, idx) def test_constructor_dtypes(self): for idx in [Index(np.array([1, 2, 3], dtype=int)), Index(np.array([1, 2, 3], dtype=int), dtype=int), Index([1, 2, 3], dtype=int)]: assert isinstance(idx, Int64Index) # These should coerce for idx in [Index(np.array([1., 2., 3.], dtype=float), dtype=int), Index([1., 2., 3.], dtype=int)]: assert isinstance(idx, Int64Index) for idx in [Index(np.array([1., 2., 3.], dtype=float)), Index(np.array([1, 2, 3], dtype=int), dtype=float), Index(np.array([1., 2., 3.], dtype=float), dtype=float), Index([1, 2, 3], dtype=float), Index([1., 2., 3.], dtype=float)]: assert isinstance(idx, Float64Index) for idx in [Index(np.array([True, False, True], dtype=bool)), Index([True, False, True]), Index(np.array([True, False, True], dtype=bool), dtype=bool), Index([True, False, True], dtype=bool)]: assert isinstance(idx, Index) assert idx.dtype == object for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'), Index([1, 2, 3], dtype='category'), Index(np.array([np_datetime64_compat('2011-01-01'), np_datetime64_compat('2011-01-02')]), dtype='category'), Index([datetime(2011, 1, 1), datetime(2011, 1, 2)], dtype='category')]: assert isinstance(idx, CategoricalIndex) for idx in [Index(np.array([np_datetime64_compat('2011-01-01'), np_datetime64_compat('2011-01-02')])), Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]: assert isinstance(idx, DatetimeIndex) for idx in [Index(np.array([np_datetime64_compat('2011-01-01'), np_datetime64_compat('2011-01-02')]), dtype=object), Index([datetime(2011, 1, 1), datetime(2011, 1, 2)], dtype=object)]: assert not isinstance(idx, DatetimeIndex) assert isinstance(idx, Index) assert idx.dtype == object for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64( 1, 'D')])), Index([timedelta(1), timedelta(1)])]: assert isinstance(idx, TimedeltaIndex) for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(1, 'D')]), dtype=object), Index([timedelta(1), timedelta(1)], dtype=object)]: assert not isinstance(idx, TimedeltaIndex) assert isinstance(idx, Index) assert idx.dtype == object def test_constructor_dtypes_datetime(self): for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: idx = pd.date_range('2011-01-01', periods=5, tz=tz) dtype = idx.dtype # pass values without timezone, as DatetimeIndex localizes it for values in [pd.date_range('2011-01-01', periods=5).values, pd.date_range('2011-01-01', periods=5).asi8]: for res in [pd.Index(values, tz=tz), pd.Index(values, dtype=dtype), pd.Index(list(values), tz=tz), pd.Index(list(values), dtype=dtype)]: tm.assert_index_equal(res, idx) # check compat with DatetimeIndex for res in [pd.DatetimeIndex(values, tz=tz), pd.DatetimeIndex(values, dtype=dtype), pd.DatetimeIndex(list(values), tz=tz), pd.DatetimeIndex(list(values), dtype=dtype)]: tm.assert_index_equal(res, idx) def test_constructor_dtypes_timedelta(self): idx = pd.timedelta_range('1 days', periods=5) dtype = idx.dtype for values in [idx.values, idx.asi8]: for res in [pd.Index(values, dtype=dtype), pd.Index(list(values), dtype=dtype)]: tm.assert_index_equal(res, idx) # check compat with TimedeltaIndex for res in [pd.TimedeltaIndex(values, dtype=dtype), pd.TimedeltaIndex(list(values), dtype=dtype)]: tm.assert_index_equal(res, idx) def test_view_with_args(self): restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex', 'empty'] for i in restricted: ind = self.indices[i] # with arguments pytest.raises(TypeError, lambda: ind.view('i8')) # these are ok for i in list(set(self.indices.keys()) - set(restricted)): ind = self.indices[i] # with arguments ind.view('i8') def test_astype(self): casted = self.intIndex.astype('i8') # it works! casted.get_loc(5) # pass on name self.intIndex.name = 'foobar' casted = self.intIndex.astype('i8') assert casted.name == 'foobar' def test_equals_object(self): # same assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])) # different length assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b'])) # same length, different values assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])) # Must also be an Index assert not Index(['a', 'b', 'c']).equals(['a', 'b', 'c']) def test_insert(self): # GH 7256 # validate neg/pos inserts result = Index(['b', 'c', 'd']) # test 0th element tm.assert_index_equal(Index(['a', 'b', 'c', 'd']), result.insert(0, 'a')) # test Nth element that follows Python list behavior tm.assert_index_equal(Index(['b', 'c', 'e', 'd']), result.insert(-1, 'e')) # test loc +/- neq (0, -1) tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z')) # test empty null_index = Index([]) tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a')) # GH 18295 (test missing) expected = Index(['a', np.nan, 'b', 'c']) for na in (np.nan, pd.NaT, None): result = Index(list('abc')).insert(1, na) tm.assert_index_equal(result, expected) def test_delete(self): idx = Index(['a', 'b', 'c', 'd'], name='idx') expected = Index(['b', 'c', 'd'], name='idx') result = idx.delete(0) tm.assert_index_equal(result, expected) assert result.name == expected.name expected = Index(['a', 'b', 'c'], name='idx') result = idx.delete(-1) tm.assert_index_equal(result, expected) assert result.name == expected.name with pytest.raises((IndexError, ValueError)): # either depending on numpy version result = idx.delete(5) def test_identical(self): # index i1 = Index(['a', 'b', 'c']) i2 = Index(['a', 'b', 'c']) assert i1.identical(i2) i1 = i1.rename('foo') assert i1.equals(i2) assert not i1.identical(i2) i2 = i2.rename('foo') assert i1.identical(i2) i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')]) i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False) assert not i3.identical(i4) def test_is_(self): ind = Index(range(10)) assert ind.is_(ind) assert ind.is_(ind.view().view().view().view()) assert not ind.is_(Index(range(10))) assert not ind.is_(ind.copy()) assert not ind.is_(ind.copy(deep=False)) assert not ind.is_(ind[:]) assert not ind.is_(ind.view(np.ndarray).view(Index)) assert not ind.is_(np.array(range(10))) # quasi-implementation dependent assert ind.is_(ind.view()) ind2 = ind.view() ind2.name = 'bob' assert ind.is_(ind2) assert ind2.is_(ind) # doesn't matter if Indices are *actually* views of underlying data, assert not ind.is_(Index(ind.values)) arr = np.array(range(1, 11)) ind1 = Index(arr, copy=False) ind2 = Index(arr, copy=False) assert not ind1.is_(ind2) def test_asof(self): d = self.dateIndex[0] assert self.dateIndex.asof(d) == d assert isna(self.dateIndex.asof(d - timedelta(1))) d = self.dateIndex[-1] assert self.dateIndex.asof(d + timedelta(1)) == d d = self.dateIndex[0].to_pydatetime() assert isinstance(self.dateIndex.asof(d), Timestamp) def test_asof_datetime_partial(self): idx = pd.date_range('2010-01-01', periods=2, freq='m') expected = Timestamp('2010-02-28') result = idx.asof('2010-02') assert result == expected assert not isinstance(result, Index) def test_nanosecond_index_access(self): s = Series([Timestamp('20130101')]).values.view('i8')[0] r = DatetimeIndex([s + 50 + i for i in range(100)]) x = Series(np.random.randn(100), index=r) first_value = x.asof(x.index[0]) # this does not yet work, as parsing strings is done via dateutil # assert first_value == x['2013-01-01 00:00:00.000000050+0000'] exp_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+0000', 'ns') assert first_value == x[Timestamp(exp_ts)] def test_comparators(self): index = self.dateIndex element = index[len(index) // 2] element = _to_m8(element) arr = np.array(index) def _check(op): arr_result = op(arr, element) index_result = op(index, element) assert isinstance(index_result, np.ndarray) tm.assert_numpy_array_equal(arr_result, index_result) _check(operator.eq) _check(operator.ne) _check(operator.gt) _check(operator.lt) _check(operator.ge) _check(operator.le) def test_booleanindex(self): boolIdx = np.repeat(True, len(self.strIndex)).astype(bool) boolIdx[5:30:2] = False subIndex = self.strIndex[boolIdx] for i, val in enumerate(subIndex): assert subIndex.get_loc(val) == i subIndex = self.strIndex[list(boolIdx)] for i, val in enumerate(subIndex): assert subIndex.get_loc(val) == i def test_fancy(self): sl = self.strIndex[[1, 2, 3]] for i in sl: assert i == sl[sl.get_loc(i)] def test_empty_fancy(self): empty_farr = np.array([], dtype=np.float_) empty_iarr = np.array([], dtype=np.int_) empty_barr = np.array([], dtype=np.bool_) # pd.DatetimeIndex is excluded, because it overrides getitem and should # be tested separately. for idx in [self.strIndex, self.intIndex, self.floatIndex]: empty_idx = idx.__class__([]) assert idx[[]].identical(empty_idx) assert idx[empty_iarr].identical(empty_idx) assert idx[empty_barr].identical(empty_idx) # np.ndarray only accepts ndarray of int & bool dtypes, so should # Index. pytest.raises(IndexError, idx.__getitem__, empty_farr) def test_getitem_error(self, indices): with pytest.raises(IndexError): indices[101] with pytest.raises(IndexError): indices['no_int'] def test_intersection(self): first = self.strIndex[:20] second = self.strIndex[:10] intersect = first.intersection(second) assert tm.equalContents(intersect, second) # Corner cases inter = first.intersection(first) assert inter is first idx1 = Index([1, 2, 3, 4, 5], name='idx') # if target has the same name, it is preserved idx2 = Index([3, 4, 5, 6, 7], name='idx') expected2 = Index([3, 4, 5], name='idx') result2 = idx1.intersection(idx2) tm.assert_index_equal(result2, expected2) assert result2.name == expected2.name # if target name is different, it will be reset idx3 = Index([3, 4, 5, 6, 7], name='other') expected3 = Index([3, 4, 5], name=None) result3 = idx1.intersection(idx3) tm.assert_index_equal(result3, expected3) assert result3.name == expected3.name # non monotonic idx1 = Index([5, 3, 2, 4, 1], name='idx') idx2 = Index([4, 7, 6, 5, 3], name='idx') expected = Index([5, 3, 4], name='idx') result = idx1.intersection(idx2) tm.assert_index_equal(result, expected) idx2 = Index([4, 7, 6, 5, 3], name='other') expected = Index([5, 3, 4], name=None) result = idx1.intersection(idx2) tm.assert_index_equal(result, expected) # non-monotonic non-unique idx1 = Index(['A', 'B', 'A', 'C']) idx2 = Index(['B', 'D']) expected = Index(['B'], dtype='object') result = idx1.intersection(idx2) tm.assert_index_equal(result, expected) idx2 = Index(['B', 'D', 'A']) expected = Index(['A', 'B', 'A'], dtype='object') result = idx1.intersection(idx2) tm.assert_index_equal(result, expected) # preserve names first = self.strIndex[5:20] second = self.strIndex[:10] first.name = 'A' second.name = 'A' intersect = first.intersection(second) assert intersect.name == 'A' second.name = 'B' intersect = first.intersection(second) assert intersect.name is None first.name = None second.name = 'B' intersect = first.intersection(second) assert intersect.name is None def test_intersect_str_dates(self): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] i1 = Index(dt_dates, dtype=object) i2 = Index(['aa'], dtype=object) res = i2.intersection(i1) assert len(res) == 0 def test_union(self): first = self.strIndex[5:20] second = self.strIndex[:10] everything = self.strIndex[:20] union = first.union(second) assert tm.equalContents(union, everything) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: result = first.union(case) assert tm.equalContents(result, everything) # Corner cases union = first.union(first) assert union is first union = first.union([]) assert union is first union = Index([]).union(first) assert union is first # preserve names first = Index(list('ab'), name='A') second = Index(list('ab'), name='B') union = first.union(second) expected = Index(list('ab'), name=None) tm.assert_index_equal(union, expected) first = Index(list('ab'), name='A') second = Index([], name='B') union = first.union(second) expected = Index(list('ab'), name=None) tm.assert_index_equal(union, expected) first = Index([], name='A') second = Index(list('ab'), name='B') union = first.union(second) expected = Index(list('ab'), name=None) tm.assert_index_equal(union, expected) first = Index(list('ab')) second = Index(list('ab'), name='B') union = first.union(second) expected = Index(list('ab'), name='B') tm.assert_index_equal(union, expected) first = Index([]) second = Index(list('ab'), name='B') union = first.union(second) expected = Index(list('ab'), name='B') tm.assert_index_equal(union, expected) first = Index(list('ab')) second = Index([], name='B') union = first.union(second) expected = Index(list('ab'), name='B') tm.assert_index_equal(union, expected) first = Index(list('ab'), name='A') second = Index(list('ab')) union = first.union(second) expected = Index(list('ab'), name='A') tm.assert_index_equal(union, expected) first = Index(list('ab'), name='A') second = Index([]) union = first.union(second) expected = Index(list('ab'), name='A') tm.assert_index_equal(union, expected) first = Index([], name='A') second = Index(list('ab')) union = first.union(second) expected = Index(list('ab'), name='A') tm.assert_index_equal(union, expected) with tm.assert_produces_warning(RuntimeWarning): firstCat = self.strIndex.union(self.dateIndex) secondCat = self.strIndex.union(self.strIndex) if self.dateIndex.dtype == np.object_: appended = np.append(self.strIndex, self.dateIndex) else: appended = np.append(self.strIndex, self.dateIndex.astype('O')) assert tm.equalContents(firstCat, appended) assert tm.equalContents(secondCat, self.strIndex) tm.assert_contains_all(self.strIndex, firstCat) tm.assert_contains_all(self.strIndex, secondCat) tm.assert_contains_all(self.dateIndex, firstCat) def test_add(self): idx = self.strIndex expected = Index(self.strIndex.values * 2) tm.assert_index_equal(idx + idx, expected) tm.assert_index_equal(idx + idx.tolist(), expected) tm.assert_index_equal(idx.tolist() + idx, expected) # test add and radd idx = Index(list('abc')) expected = Index(['a1', 'b1', 'c1']) tm.assert_index_equal(idx + '1', expected) expected = Index(['1a', '1b', '1c']) tm.assert_index_equal('1' + idx, expected) def test_sub(self): idx = self.strIndex pytest.raises(TypeError, lambda: idx - 'a') pytest.raises(TypeError, lambda: idx - idx) pytest.raises(TypeError, lambda: idx - idx.tolist()) pytest.raises(TypeError, lambda: idx.tolist() - idx) def test_map_identity_mapping(self): # GH 12766 for name, cur_index in self.indices.items(): tm.assert_index_equal(cur_index, cur_index.map(lambda x: x)) def test_map_with_tuples(self): # GH 12766 # Test that returning a single tuple from an Index # returns an Index. boolean_index = tm.makeIntIndex(3).map(lambda x: (x,)) expected = Index([(0,), (1,), (2,)]) tm.assert_index_equal(boolean_index, expected) # Test that returning a tuple from a map of a single index # returns a MultiIndex object. boolean_index = tm.makeIntIndex(3).map(lambda x: (x, x == 1)) expected = MultiIndex.from_tuples([(0, False), (1, True), (2, False)]) tm.assert_index_equal(boolean_index, expected) # Test that returning a single object from a MultiIndex # returns an Index. first_level = ['foo', 'bar', 'baz'] multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3])) reduced_index = multi_index.map(lambda x: x[0]) tm.assert_index_equal(reduced_index, Index(first_level)) def test_map_tseries_indices_return_index(self): date_index = tm.makeDateIndex(10) exp = Index([1] * 10) tm.assert_index_equal(exp, date_index.map(lambda x: 1)) period_index = tm.makePeriodIndex(10) tm.assert_index_equal(exp, period_index.map(lambda x: 1)) tdelta_index = tm.makeTimedeltaIndex(10) tm.assert_index_equal(exp, tdelta_index.map(lambda x: 1)) date_index = tm.makeDateIndex(24, freq='h', name='hourly') exp = Index(range(24), name='hourly') tm.assert_index_equal(exp, date_index.map(lambda x: x.hour)) @pytest.mark.parametrize( "mapper", [ lambda values, index: {i: e for e, i in zip(values, index)}, lambda values, index: pd.Series(values, index)]) def test_map_dictlike(self, mapper): # GH 12756 expected = Index(['foo', 'bar', 'baz']) result = tm.makeIntIndex(3).map(mapper(expected.values, [0, 1, 2])) tm.assert_index_equal(result, expected) for name in self.indices.keys(): if name == 'catIndex': # Tested in test_categorical continue elif name == 'repeats': # Cannot map duplicated index continue index = self.indices[name] expected = Index(np.arange(len(index), 0, -1)) # to match proper result coercion for uints if name == 'empty': expected = Index([]) result = index.map(mapper(expected, index)) tm.assert_index_equal(result, expected) def test_map_with_non_function_missing_values(self): # GH 12756 expected = Index([2., np.nan, 'foo']) input = Index([2, 1, 0]) mapper = Series(['foo', 2., 'baz'], index=[0, 2, -1]) tm.assert_index_equal(expected, input.map(mapper)) mapper = {0: 'foo', 2: 2.0, -1: 'baz'} tm.assert_index_equal(expected, input.map(mapper)) def test_map_na_exclusion(self): idx = Index([1.5, np.nan, 3, np.nan, 5]) result = idx.map(lambda x: x * 2, na_action='ignore') exp = idx * 2 tm.assert_index_equal(result, exp) def test_map_defaultdict(self): idx = Index([1, 2, 3]) default_dict = defaultdict(lambda: 'blank') default_dict[1] = 'stuff' result = idx.map(default_dict) expected = Index(['stuff', 'blank', 'blank']) tm.assert_index_equal(result, expected) def test_append_multiple(self): index = Index(['a', 'b', 'c', 'd', 'e', 'f']) foos = [index[:2], index[2:4], index[4:]] result = foos[0].append(foos[1:]) tm.assert_index_equal(result, index) # empty result = index.append([]) tm.assert_index_equal(result, index) def test_append_empty_preserve_name(self): left = Index([], name='foo') right = Index([1, 2, 3], name='foo') result = left.append(right) assert result.name == 'foo' left = Index([], name='foo') right = Index([1, 2, 3], name='bar') result = left.append(right) assert result.name is None def test_add_string(self): # from bug report index = Index(['a', 'b', 'c']) index2 = index + 'foo' assert 'a' not in index2 assert 'afoo' in index2 def test_iadd_string(self): index = pd.Index(['a', 'b', 'c']) # doesn't fail test unless there is a check before `+=` assert 'a' in index index += '_x' assert 'a_x' in index def test_difference(self): first = self.strIndex[5:20] second = self.strIndex[:10] answer = self.strIndex[10:20] first.name = 'name' # different names result = first.difference(second) assert tm.equalContents(result, answer) assert result.name is None # same names second.name = 'name' result = first.difference(second) assert result.name == 'name' # with empty result = first.difference([]) assert tm.equalContents(result, first) assert result.name == first.name # with everything result = first.difference(first) assert len(result) == 0 assert result.name == first.name def test_symmetric_difference(self): # smoke idx1 = Index([1, 2, 3, 4], name='idx1') idx2 = Index([2, 3, 4, 5]) result = idx1.symmetric_difference(idx2) expected = Index([1, 5]) assert tm.equalContents(result, expected) assert result.name is None # __xor__ syntax expected = idx1 ^ idx2 assert tm.equalContents(result, expected) assert result.name is None # multiIndex idx1 = MultiIndex.from_tuples(self.tuples) idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)]) result = idx1.symmetric_difference(idx2) expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)]) assert tm.equalContents(result, expected) # nans: # GH 13514 change: {nan} - {nan} == {} # (GH 6444, sorting of nans, is no longer an issue) idx1 = Index([1, np.nan, 2, 3]) idx2 = Index([0, 1, np.nan]) idx3 = Index([0, 1]) result = idx1.symmetric_difference(idx2) expected = Index([0.0, 2.0, 3.0]) tm.assert_index_equal(result, expected) result = idx1.symmetric_difference(idx3) expected = Index([0.0, 2.0, 3.0, np.nan]) tm.assert_index_equal(result, expected) # other not an Index: idx1 = Index([1, 2, 3, 4], name='idx1') idx2 = np.array([2, 3, 4, 5]) expected = Index([1, 5]) result = idx1.symmetric_difference(idx2) assert tm.equalContents(result, expected) assert result.name == 'idx1' result = idx1.symmetric_difference(idx2, result_name='new_name') assert tm.equalContents(result, expected) assert result.name == 'new_name' def test_is_numeric(self): assert not self.dateIndex.is_numeric() assert not self.strIndex.is_numeric() assert self.intIndex.is_numeric() assert self.floatIndex.is_numeric() assert not self.catIndex.is_numeric() def test_is_object(self): assert self.strIndex.is_object() assert self.boolIndex.is_object() assert not self.catIndex.is_object() assert not self.intIndex.is_object() assert not self.dateIndex.is_object() assert not self.floatIndex.is_object() def test_is_all_dates(self): assert self.dateIndex.is_all_dates assert not self.strIndex.is_all_dates assert not self.intIndex.is_all_dates def test_summary(self): self._check_method_works(Index.summary) # GH3869 ind = Index(['{other}%s', "~:{range}:0"], name='A') result = ind.summary() # shouldn't be formatted accidentally. assert '~:{range}:0' in result assert '{other}%s' in result def test_format(self): self._check_method_works(Index.format) # GH 14626 # windows has different precision on datetime.datetime.now (it doesn't # include us since the default for Timestamp shows these but Index # formatting does not we are skipping) now = datetime.now() if not str(now).endswith("000"): index = Index([now]) formatted = index.format() expected = [str(index[0])] assert formatted == expected # 2845 index = Index([1, 2.0 + 3.0j, np.nan]) formatted = index.format() expected = [str(index[0]), str(index[1]), u('NaN')] assert formatted == expected # is this really allowed? index = Index([1, 2.0 + 3.0j, None]) formatted = index.format() expected = [str(index[0]), str(index[1]), u('NaN')] assert formatted == expected self.strIndex[:0].format() def test_format_with_name_time_info(self): # bug I fixed 12/20/2011 inc = timedelta(hours=4) dates = Index([dt + inc for dt in self.dateIndex], name='something') formatted = dates.format(name=True) assert formatted[0] == 'something' def test_format_datetime_with_time(self): t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)]) result = t.format() expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00'] assert len(result) == 2 assert result == expected def test_format_none(self): values = ['a', 'b', 'c', None] idx = Index(values) idx.format() assert idx[3] is None def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def _check_method_works(self, method): method(self.empty) method(self.dateIndex) method(self.unicodeIndex) method(self.strIndex) method(self.intIndex) method(self.tuples) method(self.catIndex) def test_get_indexer(self): idx1 = Index([1, 2, 3, 4, 5]) idx2 = Index([2, 4, 6]) r1 = idx1.get_indexer(idx2) assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp)) r1 = idx2.get_indexer(idx1, method='pad') e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp) assert_almost_equal(r1, e1) r2 = idx2.get_indexer(idx1[::-1], method='pad') assert_almost_equal(r2, e1[::-1]) rffill1 = idx2.get_indexer(idx1, method='ffill') assert_almost_equal(r1, rffill1) r1 = idx2.get_indexer(idx1, method='backfill') e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp) assert_almost_equal(r1, e1) rbfill1 = idx2.get_indexer(idx1, method='bfill') assert_almost_equal(r1, rbfill1) r2 = idx2.get_indexer(idx1[::-1], method='backfill') assert_almost_equal(r2, e1[::-1]) def test_get_indexer_invalid(self): # GH10411 idx = Index(np.arange(10)) with tm.assert_raises_regex(ValueError, 'tolerance argument'): idx.get_indexer([1, 0], tolerance=1) with tm.assert_raises_regex(ValueError, 'limit argument'): idx.get_indexer([1, 0], limit=1) @pytest.mark.parametrize( 'method, tolerance, indexer, expected', [ ('pad', None, [0, 5, 9], [0, 5, 9]), ('backfill', None, [0, 5, 9], [0, 5, 9]), ('nearest', None, [0, 5, 9], [0, 5, 9]), ('pad', 0, [0, 5, 9], [0, 5, 9]), ('backfill', 0, [0, 5, 9], [0, 5, 9]), ('nearest', 0, [0, 5, 9], [0, 5, 9]), ('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]), ('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]), ('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]), ('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]), ('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]), ('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]), ('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]), ('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]), ('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])]) def test_get_indexer_nearest(self, method, tolerance, indexer, expected): idx = Index(np.arange(10)) actual = idx.get_indexer(indexer, method=method, tolerance=tolerance) tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) @pytest.mark.parametrize('listtype', [list, tuple, Series, np.array]) @pytest.mark.parametrize( 'tolerance, expected', list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]], [[0, 2, -1], [0, -1, -1], [-1, 2, 9]]))) def test_get_indexer_nearest_listlike_tolerance(self, tolerance, expected, listtype): idx = Index(np.arange(10)) actual = idx.get_indexer([0.2, 1.8, 8.5], method='nearest', tolerance=listtype(tolerance)) tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) def test_get_indexer_nearest_error(self): idx = Index(np.arange(10)) with tm.assert_raises_regex(ValueError, 'limit argument'): idx.get_indexer([1, 0], method='nearest', limit=1) with pytest.raises(ValueError, match='tolerance size must match'): idx.get_indexer([1, 0], method='nearest', tolerance=[1, 2, 3]) def test_get_indexer_nearest_decreasing(self): idx = Index(np.arange(10))[::-1] all_methods = ['pad', 'backfill', 'nearest'] for method in all_methods: actual = idx.get_indexer([0, 5, 9], method=method) tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp)) for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1], [9, 7, 0]]): actual = idx.get_indexer([0.2, 1.8, 8.5], method=method) tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp)) def test_get_indexer_strings(self): idx = pd.Index(['b', 'c']) actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad') expected = np.array([-1, 0, 1, 1], dtype=np.intp) tm.assert_numpy_array_equal(actual, expected) actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill') expected = np.array([0, 0, 1, -1], dtype=np.intp) tm.assert_numpy_array_equal(actual, expected) with pytest.raises(TypeError): idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest') with pytest.raises(TypeError): idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2) with pytest.raises(TypeError): idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=[2, 2, 2, 2]) def test_get_indexer_numeric_index_boolean_target(self): # GH 16877 numeric_idx = pd.Index(range(4)) result = numeric_idx.get_indexer([True, False, True]) expected = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) def test_get_loc(self): idx = pd.Index([0, 1, 2]) all_methods = [None, 'pad', 'backfill', 'nearest'] for method in all_methods: assert idx.get_loc(1, method=method) == 1 if method is not None: assert idx.get_loc(1, method=method, tolerance=0) == 1 with pytest.raises(TypeError): idx.get_loc([1, 2], method=method) for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: assert idx.get_loc(1.1, method) == loc for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]: assert idx.get_loc(1.1, method, tolerance=1) == loc for method in ['pad', 'backfill', 'nearest']: with pytest.raises(KeyError): idx.get_loc(1.1, method, tolerance=0.05) with tm.assert_raises_regex(ValueError, 'must be numeric'): idx.get_loc(1.1, 'nearest', tolerance='invalid') with tm.assert_raises_regex(ValueError, 'tolerance .* valid if'): idx.get_loc(1.1, tolerance=1) with pytest.raises(ValueError, match='tolerance size must match'): idx.get_loc(1.1, 'nearest', tolerance=[1, 1]) idx = pd.Index(['a', 'c']) with pytest.raises(TypeError): idx.get_loc('a', method='nearest') with pytest.raises(TypeError): idx.get_loc('a', method='pad', tolerance='invalid') def test_slice_locs(self): for dtype in [int, float]: idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) n = len(idx) assert idx.slice_locs(start=2) == (2, n) assert idx.slice_locs(start=3) == (3, n) assert idx.slice_locs(3, 8) == (3, 6) assert idx.slice_locs(5, 10) == (3, n) assert idx.slice_locs(end=8) == (0, 6) assert idx.slice_locs(end=9) == (0, 7) # reversed idx2 = idx[::-1] assert idx2.slice_locs(8, 2) == (2, 6) assert idx2.slice_locs(7, 3) == (2, 5) # float slicing idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float)) n = len(idx) assert idx.slice_locs(5.0, 10.0) == (3, n) assert idx.slice_locs(4.5, 10.5) == (3, 8) idx2 = idx[::-1] assert idx2.slice_locs(8.5, 1.5) == (2, 6) assert idx2.slice_locs(10.5, -1) == (0, n) # int slicing with floats # GH 4892, these are all TypeErrors idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int)) pytest.raises(TypeError, lambda: idx.slice_locs(5.0, 10.0), (3, n)) pytest.raises(TypeError, lambda: idx.slice_locs(4.5, 10.5), (3, 8)) idx2 = idx[::-1] pytest.raises(TypeError, lambda: idx2.slice_locs(8.5, 1.5), (2, 6)) pytest.raises(TypeError, lambda: idx2.slice_locs(10.5, -1), (0, n)) def test_slice_locs_dup(self): idx = Index(['a', 'a', 'b', 'c', 'd', 'd']) assert idx.slice_locs('a', 'd') == (0, 6) assert idx.slice_locs(end='d') == (0, 6) assert idx.slice_locs('a', 'c') == (0, 4) assert idx.slice_locs('b', 'd') == (2, 6) idx2 = idx[::-1] assert idx2.slice_locs('d', 'a') == (0, 6) assert idx2.slice_locs(end='a') == (0, 6) assert idx2.slice_locs('d', 'b') == (0, 4) assert idx2.slice_locs('c', 'a') == (2, 6) for dtype in [int, float]: idx = Index(np.array([10, 12, 12, 14], dtype=dtype)) assert idx.slice_locs(12, 12) == (1, 3) assert idx.slice_locs(11, 13) == (1, 3) idx2 = idx[::-1] assert idx2.slice_locs(12, 12) == (1, 3) assert idx2.slice_locs(13, 11) == (1, 3) def test_slice_locs_na(self): idx = Index([np.nan, 1, 2]) pytest.raises(KeyError, idx.slice_locs, start=1.5) pytest.raises(KeyError, idx.slice_locs, end=1.5) assert idx.slice_locs(1) == (1, 3) assert idx.slice_locs(np.nan) == (0, 3) idx = Index([0, np.nan, np.nan, 1, 2]) assert idx.slice_locs(np.nan) == (1, 5) def test_slice_locs_negative_step(self): idx = Index(list('bcdxy')) SLC = pd.IndexSlice def check_slice(in_slice, expected): s_start, s_stop = idx.slice_locs(in_slice.start, in_slice.stop, in_slice.step) result = idx[s_start:s_stop:in_slice.step] expected = pd.Index(list(expected)) tm.assert_index_equal(result, expected) for in_slice, expected in [ (SLC[::-1], 'yxdcb'), (SLC['b':'y':-1], ''), (SLC['b'::-1], 'b'), (SLC[:'b':-1], 'yxdcb'), (SLC[:'y':-1], 'y'), (SLC['y'::-1], 'yxdcb'), (SLC['y'::-4], 'yb'), # absent labels (SLC[:'a':-1], 'yxdcb'), (SLC[:'a':-2], 'ydb'), (SLC['z'::-1], 'yxdcb'), (SLC['z'::-3], 'yc'), (SLC['m'::-1], 'dcb'), (SLC[:'m':-1], 'yx'), (SLC['a':'a':-1], ''), (SLC['z':'z':-1], ''), (SLC['m':'m':-1], '') ]: check_slice(in_slice, expected) def test_drop(self): n = len(self.strIndex) drop = self.strIndex[lrange(5, 10)] dropped = self.strIndex.drop(drop) expected = self.strIndex[lrange(5) + lrange(10, n)] tm.assert_index_equal(dropped, expected) pytest.raises(ValueError, self.strIndex.drop, ['foo', 'bar']) pytest.raises(ValueError, self.strIndex.drop, ['1', 'bar']) # errors='ignore' mixed = drop.tolist() + ['foo'] dropped = self.strIndex.drop(mixed, errors='ignore') expected = self.strIndex[lrange(5) + lrange(10, n)] tm.assert_index_equal(dropped, expected) dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore') expected = self.strIndex[lrange(n)] tm.assert_index_equal(dropped, expected) dropped = self.strIndex.drop(self.strIndex[0]) expected = self.strIndex[1:] tm.assert_index_equal(dropped, expected) ser = Index([1, 2, 3]) dropped = ser.drop(1) expected = Index([2, 3]) tm.assert_index_equal(dropped, expected) # errors='ignore' pytest.raises(ValueError, ser.drop, [3, 4]) dropped = ser.drop(4, errors='ignore') expected = Index([1, 2, 3]) tm.assert_index_equal(dropped, expected) dropped = ser.drop([3, 4, 5], errors='ignore') expected = Index([1, 2]) tm.assert_index_equal(dropped, expected) @pytest.mark.parametrize("values", [['a', 'b', ('c', 'd')], ['a', ('c', 'd'), 'b'], [('c', 'd'), 'a', 'b']]) @pytest.mark.parametrize("to_drop", [[('c', 'd'), 'a'], ['a', ('c', 'd')]]) def test_drop_tuple(self, values, to_drop): # GH 18304 index = pd.Index(values) expected = pd.Index(['b']) result = index.drop(to_drop) tm.assert_index_equal(result, expected) removed = index.drop(to_drop[0]) for drop_me in to_drop[1], [to_drop[1]]: result = removed.drop(drop_me) tm.assert_index_equal(result, expected) removed = index.drop(to_drop[1]) for drop_me in to_drop[1], [to_drop[1]]: pytest.raises(ValueError, removed.drop, drop_me) def test_tuple_union_bug(self): import pandas import numpy as np aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')], dtype=[('num', int), ('let', 'a1')]) aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2, 'C')], dtype=[('num', int), ('let', 'a1')]) idx1 = pandas.Index(aidx1) idx2 = pandas.Index(aidx2) # intersection broken? int_idx = idx1.intersection(idx2) # needs to be 1d like idx1 and idx2 expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2))) assert int_idx.ndim == 1 tm.assert_index_equal(int_idx, expected) # union broken union_idx = idx1.union(idx2) expected = idx2 assert union_idx.ndim == 1 tm.assert_index_equal(union_idx, expected) def test_is_monotonic_incomparable(self): index = Index([5, datetime.now(), 7]) assert not index.is_monotonic_increasing assert not index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_get_set_value(self): values = np.random.randn(100) date = self.dateIndex[67] assert_almost_equal(self.dateIndex.get_value(values, date), values[67]) self.dateIndex.set_value(values, date, 10) assert values[67] == 10 def test_isin(self): values = ['foo', 'bar', 'quux'] idx = Index(['qux', 'baz', 'foo', 'bar']) result = idx.isin(values) expected = np.array([False, False, True, True]) tm.assert_numpy_array_equal(result, expected) # set result = idx.isin(set(values)) tm.assert_numpy_array_equal(result, expected) # empty, return dtype bool idx = Index([]) result = idx.isin(values) assert len(result) == 0 assert result.dtype == np.bool_ @pytest.mark.skipif(PYPY, reason="np.nan is float('nan') on PyPy") def test_isin_nan_not_pypy(self): tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]), np.array([False, False])) @pytest.mark.skipif(not PYPY, reason="np.nan is float('nan') on PyPy") def test_isin_nan_pypy(self): tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]), np.array([False, True])) def test_isin_nan_common(self): tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]), np.array([False, True])) tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]), np.array([False, False])) # Float64Index overrides isin, so must be checked separately tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([np.nan]), np.array([False, True])) tm.assert_numpy_array_equal( Float64Index([1.0, np.nan]).isin([float('nan')]), np.array([False, True])) # we cannot compare NaT with NaN tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([pd.NaT]), np.array([False, False])) def test_isin_level_kwarg(self): def check_idx(idx): values = idx.tolist()[-2:] + ['nonexisting'] expected = np.array([False, False, True, True]) tm.assert_numpy_array_equal(expected, idx.isin(values, level=0)) tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1)) pytest.raises(IndexError, idx.isin, values, level=1) pytest.raises(IndexError, idx.isin, values, level=10) pytest.raises(IndexError, idx.isin, values, level=-2) pytest.raises(KeyError, idx.isin, values, level=1.0) pytest.raises(KeyError, idx.isin, values, level='foobar') idx.name = 'foobar' tm.assert_numpy_array_equal(expected, idx.isin(values, level='foobar')) pytest.raises(KeyError, idx.isin, values, level='xyzzy') pytest.raises(KeyError, idx.isin, values, level=np.nan) check_idx(Index(['qux', 'baz', 'foo', 'bar'])) # Float64Index overrides isin, so must be checked separately check_idx(Float64Index([1.0, 2.0, 3.0, 4.0])) @pytest.mark.parametrize("empty", [[], Series(), np.array([])]) def test_isin_empty(self, empty): # see gh-16991 idx = Index(["a", "b"]) expected = np.array([False, False]) result = idx.isin(empty) tm.assert_numpy_array_equal(expected, result) def test_boolean_cmp(self): values = [1, 2, 3, 4] idx = Index(values) res = (idx == values) tm.assert_numpy_array_equal(res, np.array( [True, True, True, True], dtype=bool)) def test_get_level_values(self): result = self.strIndex.get_level_values(0) tm.assert_index_equal(result, self.strIndex) # test for name (GH 17414) index_with_name = self.strIndex.copy() index_with_name.name = 'a' result = index_with_name.get_level_values('a') tm.assert_index_equal(result, index_with_name) def test_slice_keep_name(self): idx = Index(['a', 'b'], name='asdf') assert idx.name == idx[1:].name def test_join_self(self): # instance attributes of the form self.<name>Index indices = 'unicode', 'str', 'date', 'int', 'float' kinds = 'outer', 'inner', 'left', 'right' for index_kind in indices: res = getattr(self, '{0}Index'.format(index_kind)) for kind in kinds: joined = res.join(res, how=kind) assert res is joined def test_str_attribute(self): # GH9068 methods = ['strip', 'rstrip', 'lstrip'] idx = Index([' jack', 'jill ', ' jesse ', 'frank']) for method in methods: expected = Index([getattr(str, method)(x) for x in idx.values]) tm.assert_index_equal( getattr(Index.str, method)(idx.str), expected) # create a few instances that are not able to use .str accessor indices = [Index(range(5)), tm.makeDateIndex(10), MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]), PeriodIndex(start='2000', end='2010', freq='A')] for idx in indices: with tm.assert_raises_regex(AttributeError, 'only use .str accessor'): idx.str.repeat(2) idx = Index(['a b c', 'd e', 'f']) expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']]) tm.assert_index_equal(idx.str.split(), expected) tm.assert_index_equal(idx.str.split(expand=False), expected) expected = MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan), ('f', np.nan, np.nan)]) tm.assert_index_equal(idx.str.split(expand=True), expected) # test boolean case, should return np.array instead of boolean Index idx = Index(['a1', 'a2', 'b1', 'b2']) expected = np.array([True, True, False, False]) tm.assert_numpy_array_equal(idx.str.startswith('a'), expected) assert isinstance(idx.str.startswith('a'), np.ndarray) s = Series(range(4), index=idx) expected = Series(range(2), index=['a1', 'a2']) tm.assert_series_equal(s[s.index.str.startswith('a')], expected) def test_tab_completion(self): # GH 9910 idx = Index(list('abcd')) assert 'str' in dir(idx) idx = Index(range(4)) assert 'str' not in dir(idx) def test_indexing_doesnt_change_class(self): idx = Index([1, 2, 3, 'a', 'b', 'c']) assert idx[1:3].identical(pd.Index([2, 3], dtype=np.object_)) assert idx[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_)) def test_outer_join_sort(self): left_idx = Index(np.random.permutation(15)) right_idx = tm.makeDateIndex(10) with tm.assert_produces_warning(RuntimeWarning): joined = left_idx.join(right_idx, how='outer') # right_idx in this case because DatetimeIndex has join precedence over # Int64Index with tm.assert_produces_warning(RuntimeWarning): expected = right_idx.astype(object).union(left_idx.astype(object)) tm.assert_index_equal(joined, expected) def test_nan_first_take_datetime(self): idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')]) res = idx.take([-1, 0, 1]) exp = Index([idx[-1], idx[0], idx[1]]) tm.assert_index_equal(res, exp) def test_take_fill_value(self): # GH 12631 idx = pd.Index(list('ABC'), name='xxx') result = idx.take(np.array([1, 0, -1])) expected = pd.Index(list('BAC'), name='xxx') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = pd.Index(['B', 'A', np.nan], name='xxx') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.Index(['B', 'A', 'C'], name='xxx') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -2]), fill_value=True) with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_reindex_preserves_name_if_target_is_list_or_ndarray(self): # GH6552 idx = pd.Index([0, 1, 2]) dt_idx = pd.date_range('20130101', periods=3) idx.name = None assert idx.reindex([])[0].name is None assert idx.reindex(np.array([]))[0].name is None assert idx.reindex(idx.tolist())[0].name is None assert idx.reindex(idx.tolist()[:-1])[0].name is None assert idx.reindex(idx.values)[0].name is None assert idx.reindex(idx.values[:-1])[0].name is None # Must preserve name even if dtype changes. assert idx.reindex(dt_idx.values)[0].name is None assert idx.reindex(dt_idx.tolist())[0].name is None idx.name = 'foobar' assert idx.reindex([])[0].name == 'foobar' assert idx.reindex(np.array([]))[0].name == 'foobar' assert idx.reindex(idx.tolist())[0].name == 'foobar' assert idx.reindex(idx.tolist()[:-1])[0].name == 'foobar' assert idx.reindex(idx.values)[0].name == 'foobar' assert idx.reindex(idx.values[:-1])[0].name == 'foobar' # Must preserve name even if dtype changes. assert idx.reindex(dt_idx.values)[0].name == 'foobar' assert idx.reindex(dt_idx.tolist())[0].name == 'foobar' def test_reindex_preserves_type_if_target_is_empty_list_or_array(self): # GH7774 idx = pd.Index(list('abc')) def get_reindex_type(target): return idx.reindex(target)[0].dtype.type assert get_reindex_type([]) == np.object_ assert get_reindex_type(np.array([])) == np.object_ assert get_reindex_type(np.array([], dtype=np.int64)) == np.object_ def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self): # GH7774 idx = pd.Index(list('abc')) def get_reindex_type(target): return idx.reindex(target)[0].dtype.type assert get_reindex_type(pd.Int64Index([])) == np.int64 assert get_reindex_type(pd.Float64Index([])) == np.float64 assert get_reindex_type(pd.DatetimeIndex([])) == np.datetime64 reindexed = idx.reindex(pd.MultiIndex( [pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0] assert reindexed.levels[0].dtype.type == np.int64 assert reindexed.levels[1].dtype.type == np.float64 def test_groupby(self): idx = Index(range(5)) groups = idx.groupby(np.array([1, 1, 2, 2, 2])) exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])} tm.assert_dict_equal(groups, exp) def test_equals_op_multiindex(self): # GH9785 # test comparisons of multiindex from pandas.compat import StringIO df = pd.read_csv(StringIO('a,b,c\n1,2,3\n4,5,6'), index_col=[0, 1]) tm.assert_numpy_array_equal(df.index == df.index, np.array([True, True])) mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)]) tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True])) mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)]) tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False])) mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) with tm.assert_raises_regex(ValueError, "Lengths must match"): df.index == mi3 index_a = Index(['foo', 'bar', 'baz']) with tm.assert_raises_regex(ValueError, "Lengths must match"): df.index == index_a tm.assert_numpy_array_equal(index_a == mi3, np.array([False, False, False])) def test_conversion_preserves_name(self): # GH 10875 i = pd.Index(['01:02:03', '01:02:04'], name='label') assert i.name == pd.to_datetime(i).name assert i.name == pd.to_timedelta(i).name def test_string_index_repr(self): # py3/py2 repr can differ because of "u" prefix # which also affects to displayed element size if PY3: coerce = lambda x: x else: coerce = unicode # noqa # short idx = pd.Index(['a', 'bb', 'ccc']) if PY3: expected = u"""Index(['a', 'bb', 'ccc'], dtype='object')""" assert repr(idx) == expected else: expected = u"""Index([u'a', u'bb', u'ccc'], dtype='object')""" assert coerce(idx) == expected # multiple lines idx = pd.Index(['a', 'bb', 'ccc'] * 10) if PY3: expected = u"""\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], dtype='object')""" assert repr(idx) == expected else: expected = u"""\ Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], dtype='object')""" assert coerce(idx) == expected # truncated idx = pd.Index(['a', 'bb', 'ccc'] * 100) if PY3: expected = u"""\ Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', ... 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], dtype='object', length=300)""" assert repr(idx) == expected else: expected = u"""\ Index([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', ... u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'], dtype='object', length=300)""" assert coerce(idx) == expected # short idx = pd.Index([u'あ', u'いい', u'ううう']) if PY3: expected = u"""Index(['あ', 'いい', 'ううう'], dtype='object')""" assert repr(idx) == expected else: expected = u"""Index([u'あ', u'いい', u'ううう'], dtype='object')""" assert coerce(idx) == expected # multiple lines idx = pd.Index([u'あ', u'いい', u'ううう'] * 10) if PY3: expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" u" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " u"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n" u" 'あ', 'いい', 'ううう', 'あ', 'いい', " u"'ううう'],\n" u" dtype='object')") assert repr(idx) == expected else: expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" u" u'いい', u'ううう', u'あ', u'いい', u'ううう', " u"u'あ', u'いい', u'ううう', u'あ', u'いい',\n" u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" u" dtype='object')") assert coerce(idx) == expected # truncated idx = pd.Index([u'あ', u'いい', u'ううう'] * 100) if PY3: expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', " u"'あ', 'いい', 'ううう', 'あ',\n" u" ...\n" u" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', " u"'ううう', 'あ', 'いい', 'ううう'],\n" u" dtype='object', length=300)") assert repr(idx) == expected else: expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " u"u'ううう', u'あ', u'いい', u'ううう', u'あ',\n" u" ...\n" u" u'ううう', u'あ', u'いい', u'ううう', u'あ', " u"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\n" u" dtype='object', length=300)") assert coerce(idx) == expected # Emable Unicode option ----------------------------------------- with cf.option_context('display.unicode.east_asian_width', True): # short idx = pd.Index([u'あ', u'いい', u'ううう']) if PY3: expected = (u"Index(['あ', 'いい', 'ううう'], " u"dtype='object')") assert repr(idx) == expected else: expected = (u"Index([u'あ', u'いい', u'ううう'], " u"dtype='object')") assert coerce(idx) == expected # multiple lines idx = pd.Index([u'あ', u'いい', u'ううう'] * 10) if PY3: expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " u"'ううう', 'あ', 'いい', 'ううう',\n" u" 'あ', 'いい', 'ううう', 'あ', 'いい', " u"'ううう', 'あ', 'いい', 'ううう',\n" u" 'あ', 'いい', 'ううう', 'あ', 'いい', " u"'ううう', 'あ', 'いい', 'ううう',\n" u" 'あ', 'いい', 'ううう'],\n" u" dtype='object')""") assert repr(idx) == expected else: expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " u"u'ううう', u'あ', u'いい',\n" u" u'ううう', u'あ', u'いい', u'ううう', " u"u'あ', u'いい', u'ううう', u'あ',\n" u" u'いい', u'ううう', u'あ', u'いい', " u"u'ううう', u'あ', u'いい',\n" u" u'ううう', u'あ', u'いい', u'ううう', " u"u'あ', u'いい', u'ううう'],\n" u" dtype='object')") assert coerce(idx) == expected # truncated idx = pd.Index([u'あ', u'いい', u'ううう'] * 100) if PY3: expected = (u"Index(['あ', 'いい', 'ううう', 'あ', 'いい', " u"'ううう', 'あ', 'いい', 'ううう',\n" u" 'あ',\n" u" ...\n" u" 'ううう', 'あ', 'いい', 'ううう', 'あ', " u"'いい', 'ううう', 'あ', 'いい',\n" u" 'ううう'],\n" u" dtype='object', length=300)") assert repr(idx) == expected else: expected = (u"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', " u"u'ううう', u'あ', u'いい',\n" u" u'ううう', u'あ',\n" u" ...\n" u" u'ううう', u'あ', u'いい', u'ううう', " u"u'あ', u'いい', u'ううう', u'あ',\n" u" u'いい', u'ううう'],\n" u" dtype='object', length=300)") assert coerce(idx) == expected @pytest.mark.parametrize('dtype', [np.int64, np.float64]) @pytest.mark.parametrize('delta', [1, 0, -1]) def test_addsub_arithmetic(self, dtype, delta): # GH 8142 delta = dtype(delta) idx = pd.Index([10, 11, 12], dtype=dtype) result = idx + delta expected = pd.Index(idx.values + delta, dtype=dtype) tm.assert_index_equal(result, expected) # this subtraction used to fail result = idx - delta expected = pd.Index(idx.values - delta, dtype=dtype) tm.assert_index_equal(result, expected) tm.assert_index_equal(idx + idx, 2 * idx) tm.assert_index_equal(idx - idx, 0 * idx) assert not (idx - idx).empty class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ # in py2 and py3 because ints and strings are uncomparable in py3 # (GH 13514) _holder = Index def setup_method(self, method): self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c'])) self.setup_indices() def create_index(self): return self.mixedIndex def test_argsort(self): idx = self.create_index() if PY36: with tm.assert_raises_regex(TypeError, "'>|<' not supported"): result = idx.argsort() elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"): result = idx.argsort() else: result = idx.argsort() expected = np.array(idx).argsort() tm.assert_numpy_array_equal(result, expected, check_dtype=False) def test_numpy_argsort(self): idx = self.create_index() if PY36: with tm.assert_raises_regex(TypeError, "'>|<' not supported"): result = np.argsort(idx) elif PY3: with tm.assert_raises_regex(TypeError, "unorderable types"): result = np.argsort(idx) else: result = np.argsort(idx) expected = idx.argsort() tm.assert_numpy_array_equal(result, expected) def test_copy_name(self): # Check that "name" argument passed at initialization is honoured # GH12309 idx = self.create_index() first = idx.__class__(idx, copy=True, name='mario') second = first.__class__(first, copy=False) # Even though "copy=False", we want a new object. assert first is not second # Not using tm.assert_index_equal() since names differ: assert idx.equals(first) assert first.name == 'mario' assert second.name == 'mario' s1 = Series(2, index=first) s2 = Series(3, index=second[:-1]) warning_type = RuntimeWarning if PY3 else None with tm.assert_produces_warning(warning_type): # Python 3: Unorderable types s3 = s1 * s2 assert s3.index.name == 'mario' def test_copy_name2(self): # Check that adding a "name" parameter to the copy is honored # GH14302 idx = pd.Index([1, 2], name='MyName') idx1 = idx.copy() assert idx.equals(idx1) assert idx.name == 'MyName' assert idx1.name == 'MyName' idx2 = idx.copy(name='NewName') assert idx.equals(idx2) assert idx.name == 'MyName' assert idx2.name == 'NewName' idx3 = idx.copy(names=['NewName']) assert idx.equals(idx3) assert idx.name == 'MyName' assert idx.names == ['MyName'] assert idx3.name == 'NewName' assert idx3.names == ['NewName'] def test_union_base(self): idx = self.create_index() first = idx[3:] second = idx[:5] if PY3: with tm.assert_produces_warning(RuntimeWarning): # unorderable types result = first.union(second) expected = Index(['b', 2, 'c', 0, 'a', 1]) tm.assert_index_equal(result, expected) else: result = first.union(second) expected = Index(['b', 2, 'c', 0, 'a', 1]) tm.assert_index_equal(result, expected) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: if PY3: with tm.assert_produces_warning(RuntimeWarning): # unorderable types result = first.union(case) assert tm.equalContents(result, idx) else: result = first.union(case) assert tm.equalContents(result, idx) def test_intersection_base(self): # (same results for py2 and py3 but sortedness not tested elsewhere) idx = self.create_index() first = idx[:5] second = idx[:3] result = first.intersection(second) expected = Index([0, 'a', 1]) tm.assert_index_equal(result, expected) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: result = first.intersection(case) assert tm.equalContents(result, second) def test_difference_base(self): # (same results for py2 and py3 but sortedness not tested elsewhere) idx = self.create_index() first = idx[:4] second = idx[3:] result = first.difference(second) expected = Index([0, 1, 'a']) tm.assert_index_equal(result, expected) def test_symmetric_difference(self): # (same results for py2 and py3 but sortedness not tested elsewhere) idx = self.create_index() first = idx[:4] second = idx[3:] result = first.symmetric_difference(second) expected = Index([0, 1, 2, 'a', 'c']) tm.assert_index_equal(result, expected) def test_logical_compat(self): idx = self.create_index() assert idx.all() == idx.values.all() assert idx.any() == idx.values.any() def test_dropna(self): # GH 6194 for dtype in [None, object, 'category']: idx = pd.Index([1, 2, 3], dtype=dtype) tm.assert_index_equal(idx.dropna(), idx) idx = pd.Index([1., 2., 3.], dtype=dtype) tm.assert_index_equal(idx.dropna(), idx) nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype) tm.assert_index_equal(nanidx.dropna(), idx) idx = pd.Index(['A', 'B', 'C'], dtype=dtype) tm.assert_index_equal(idx.dropna(), idx) nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype) tm.assert_index_equal(nanidx.dropna(), idx) tm.assert_index_equal(nanidx.dropna(how='any'), idx) tm.assert_index_equal(nanidx.dropna(how='all'), idx) idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03']) tm.assert_index_equal(idx.dropna(), idx) nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03', pd.NaT]) tm.assert_index_equal(nanidx.dropna(), idx) idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days']) tm.assert_index_equal(idx.dropna(), idx) nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days', '3 days', pd.NaT]) tm.assert_index_equal(nanidx.dropna(), idx) idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M') tm.assert_index_equal(idx.dropna(), idx) nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'], freq='M') tm.assert_index_equal(nanidx.dropna(), idx) msg = "invalid how option: xxx" with tm.assert_raises_regex(ValueError, msg): pd.Index([1, 2, 3]).dropna(how='xxx') def test_get_combined_index(self): result = _get_combined_index([]) tm.assert_index_equal(result, Index([])) def test_repeat(self): repeats = 2 idx = pd.Index([1, 2, 3]) expected = pd.Index([1, 1, 2, 2, 3, 3]) result = idx.repeat(repeats) tm.assert_index_equal(result, expected) with tm.assert_produces_warning(FutureWarning): result = idx.repeat(n=repeats) tm.assert_index_equal(result, expected) def test_is_monotonic_na(self): examples = [pd.Index([np.nan]), pd.Index([np.nan, 1]), pd.Index([1, 2, np.nan]), pd.Index(['a', 'b', np.nan]), pd.to_datetime(['NaT']), pd.to_datetime(['NaT', '2000-01-01']), pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']), pd.to_timedelta(['1 day', 'NaT']), ] for index in examples: assert not index.is_monotonic_increasing assert not index.is_monotonic_decreasing assert not index._is_strictly_monotonic_increasing assert not index._is_strictly_monotonic_decreasing def test_repr_summary(self): with cf.option_context('display.max_seq_items', 10): r = repr(pd.Index(np.arange(1000))) assert len(r) < 200 assert "..." in r def test_int_name_format(self): index = Index(['a', 'b', 'c'], name=0) s = Series(lrange(3), index) df = DataFrame(lrange(3), index=index) repr(s) repr(df) def test_print_unicode_columns(self): df = pd.DataFrame({u("\u05d0"): [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]}) repr(df.columns) # should not raise UnicodeDecodeError def test_unicode_string_with_unicode(self): idx = Index(lrange(1000)) if PY3: str(idx) else: text_type(idx) def test_bytestring_with_unicode(self): idx = Index(lrange(1000)) if PY3: bytes(idx) else: str(idx) def test_intersect_str_dates(self): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] i1 = Index(dt_dates, dtype=object) i2 = Index(['aa'], dtype=object) res = i2.intersection(i1) assert len(res) == 0 class TestIndexUtils(object): @pytest.mark.parametrize('data, names, expected', [ ([[1, 2, 3]], None, Index([1, 2, 3])), ([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')), ([['a', 'a'], ['c', 'd']], None, MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])), ([['a', 'a'], ['c', 'd']], ['L1', 'L2'], MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]], names=['L1', 'L2'])), ]) def test_ensure_index_from_sequences(self, data, names, expected): result = _ensure_index_from_sequences(data, names) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt']) def test_generated_op_names(opname, indices): index = indices opname = '__{name}__'.format(name=opname) method = getattr(index, opname) assert method.__name__ == opname
bsd-3-clause
pythonvietnam/scikit-learn
sklearn/neighbors/approximate.py
71
22357
"""Approximate nearest neighbor search""" # Author: Maheshakya Wijewardena <[email protected]> # Joel Nothman <[email protected]> import numpy as np import warnings from scipy import sparse from .base import KNeighborsMixin, RadiusNeighborsMixin from ..base import BaseEstimator from ..utils.validation import check_array from ..utils import check_random_state from ..metrics.pairwise import pairwise_distances from ..random_projection import GaussianRandomProjection __all__ = ["LSHForest"] HASH_DTYPE = '>u4' MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8 def _find_matching_indices(tree, bin_X, left_mask, right_mask): """Finds indices in sorted array of integers. Most significant h bits in the binary representations of the integers are matched with the items' most significant h bits. """ left_index = np.searchsorted(tree, bin_X & left_mask) right_index = np.searchsorted(tree, bin_X | right_mask, side='right') return left_index, right_index def _find_longest_prefix_match(tree, bin_X, hash_size, left_masks, right_masks): """Find the longest prefix match in tree for each query in bin_X Most significant bits are considered as the prefix. """ hi = np.empty_like(bin_X, dtype=np.intp) hi.fill(hash_size) lo = np.zeros_like(bin_X, dtype=np.intp) res = np.empty_like(bin_X, dtype=np.intp) left_idx, right_idx = _find_matching_indices(tree, bin_X, left_masks[hi], right_masks[hi]) found = right_idx > left_idx res[found] = lo[found] = hash_size r = np.arange(bin_X.shape[0]) kept = r[lo < hi] # indices remaining in bin_X mask while kept.shape[0]: mid = (lo.take(kept) + hi.take(kept)) // 2 left_idx, right_idx = _find_matching_indices(tree, bin_X.take(kept), left_masks[mid], right_masks[mid]) found = right_idx > left_idx mid_found = mid[found] lo[kept[found]] = mid_found + 1 res[kept[found]] = mid_found hi[kept[~found]] = mid[~found] kept = r[lo < hi] return res class ProjectionToHashMixin(object): """Turn a transformed real-valued array into a hash""" @staticmethod def _to_hash(projected): if projected.shape[1] % 8 != 0: raise ValueError('Require reduced dimensionality to be a multiple ' 'of 8 for hashing') # XXX: perhaps non-copying operation better out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE) return out.reshape(projected.shape[0], -1) def fit_transform(self, X, y=None): self.fit(X) return self.transform(X) def transform(self, X, y=None): return self._to_hash(super(ProjectionToHashMixin, self).transform(X)) class GaussianRandomProjectionHash(ProjectionToHashMixin, GaussianRandomProjection): """Use GaussianRandomProjection to produce a cosine LSH fingerprint""" def __init__(self, n_components=8, random_state=None): super(GaussianRandomProjectionHash, self).__init__( n_components=n_components, random_state=random_state) def _array_of_arrays(list_of_arrays): """Creates an array of array from list of arrays.""" out = np.empty(len(list_of_arrays), dtype=object) out[:] = list_of_arrays return out class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin): """Performs approximate nearest neighbor search using LSH forest. LSH Forest: Locality Sensitive Hashing forest [1] is an alternative method for vanilla approximate nearest neighbor search methods. LSH forest data structure has been implemented using sorted arrays and binary search and 32 bit fixed-length hashes. Random projection is used as the hash family which approximates cosine distance. The cosine distance is defined as ``1 - cosine_similarity``: the lowest value is 0 (identical point) but it is bounded above by 2 for the farthest points. Its value does not depend on the norm of the vector points but only on their relative angles. Read more in the :ref:`User Guide <approximate_nearest_neighbors>`. Parameters ---------- n_estimators : int (default = 10) Number of trees in the LSH Forest. min_hash_match : int (default = 4) lowest hash length to be searched when candidate selection is performed for nearest neighbors. n_candidates : int (default = 10) Minimum number of candidates evaluated per estimator, assuming enough items meet the `min_hash_match` constraint. n_neighbors : int (default = 5) Number of neighbors to be returned from query function when it is not provided to the :meth:`kneighbors` method. radius : float, optinal (default = 1.0) Radius from the data point to its neighbors. This is the parameter space to use by default for the :meth`radius_neighbors` queries. radius_cutoff_ratio : float, optional (default = 0.9) A value ranges from 0 to 1. Radius neighbors will be searched until the ratio between total neighbors within the radius and the total candidates becomes less than this value unless it is terminated by hash length reaching `min_hash_match`. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- hash_functions_ : list of GaussianRandomProjectionHash objects Hash function g(p,x) for a tree is an array of 32 randomly generated float arrays with the same dimenstion as the data set. This array is stored in GaussianRandomProjectionHash object and can be obtained from ``components_`` attribute. trees_ : array, shape (n_estimators, n_samples) Each tree (corresponding to a hash function) contains an array of sorted hashed values. The array representation may change in future versions. original_indices_ : array, shape (n_estimators, n_samples) Original indices of sorted hashed values in the fitted index. References ---------- .. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning Indexes for Similarity Search", WWW '05 Proceedings of the 14th international conference on World Wide Web, 651-660, 2005. Examples -------- >>> from sklearn.neighbors import LSHForest >>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]] >>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]] >>> lshf = LSHForest() >>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10, n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9, random_state=None) >>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2) >>> distances # doctest: +ELLIPSIS array([[ 0.069..., 0.149...], [ 0.229..., 0.481...], [ 0.004..., 0.014...]]) >>> indices array([[1, 2], [2, 0], [4, 0]]) """ def __init__(self, n_estimators=10, radius=1.0, n_candidates=50, n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9, random_state=None): self.n_estimators = n_estimators self.radius = radius self.random_state = random_state self.n_candidates = n_candidates self.n_neighbors = n_neighbors self.min_hash_match = min_hash_match self.radius_cutoff_ratio = radius_cutoff_ratio def _compute_distances(self, query, candidates): """Computes the cosine distance. Distance is from the query to points in the candidates array. Returns argsort of distances in the candidates array and sorted distances. """ if candidates.shape == (0,): # needed since _fit_X[np.array([])] doesn't work if _fit_X sparse return np.empty(0, dtype=np.int), np.empty(0, dtype=float) if sparse.issparse(self._fit_X): candidate_X = self._fit_X[candidates] else: candidate_X = self._fit_X.take(candidates, axis=0, mode='clip') distances = pairwise_distances(query, candidate_X, metric='cosine')[0] distance_positions = np.argsort(distances) distances = distances.take(distance_positions, mode='clip', axis=0) return distance_positions, distances def _generate_masks(self): """Creates left and right masks for all hash lengths.""" tri_size = MAX_HASH_SIZE + 1 # Called once on fitting, output is independent of hashes left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::-1, ::-1] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE) def _get_candidates(self, query, max_depth, bin_queries, n_neighbors): """Performs the Synchronous ascending phase. Returns an array of candidates, their distance ranks and distances. """ index_size = self._fit_X.shape[0] # Number of candidates considered including duplicates # XXX: not sure whether this is being calculated correctly wrt # duplicates from different iterations through a single tree n_candidates = 0 candidate_set = set() min_candidates = self.n_candidates * self.n_estimators while (max_depth > self.min_hash_match and (n_candidates < min_candidates or len(candidate_set) < n_neighbors)): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) n_candidates += stop - start candidate_set.update( self.original_indices_[i][start:stop].tolist()) max_depth -= 1 candidates = np.fromiter(candidate_set, count=len(candidate_set), dtype=np.intp) # For insufficient candidates, candidates are filled. # Candidates are filled from unselected indices uniformly. if candidates.shape[0] < n_neighbors: warnings.warn( "Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (n_neighbors, self.min_hash_match)) remaining = np.setdiff1d(np.arange(0, index_size), candidates) to_fill = n_neighbors - candidates.shape[0] candidates = np.concatenate((candidates, remaining[:to_fill])) ranks, distances = self._compute_distances(query, candidates.astype(int)) return (candidates[ranks[:n_neighbors]], distances[:n_neighbors]) def _get_radius_neighbors(self, query, max_depth, bin_queries, radius): """Finds radius neighbors from the candidates obtained. Their distances from query are smaller than radius. Returns radius neighbors and distances. """ ratio_within_radius = 1 threshold = 1 - self.radius_cutoff_ratio total_candidates = np.array([], dtype=int) total_neighbors = np.array([], dtype=int) total_distances = np.array([], dtype=float) while (max_depth > self.min_hash_match and ratio_within_radius > threshold): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] candidates = [] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) candidates.extend( self.original_indices_[i][start:stop].tolist()) candidates = np.setdiff1d(candidates, total_candidates) total_candidates = np.append(total_candidates, candidates) ranks, distances = self._compute_distances(query, candidates) m = np.searchsorted(distances, radius, side='right') positions = np.searchsorted(total_distances, distances[:m]) total_neighbors = np.insert(total_neighbors, positions, candidates[ranks[:m]]) total_distances = np.insert(total_distances, positions, distances[:m]) ratio_within_radius = (total_neighbors.shape[0] / float(total_candidates.shape[0])) max_depth = max_depth - 1 return total_neighbors, total_distances def fit(self, X, y=None): """Fit the LSH forest on the data. This creates binary hashes of input data points by getting the dot product of input points and hash_function then transforming the projection into a binary string array based on the sign (positive/negative) of the projection. A sorted array of binary hashes is created. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- self : object Returns self. """ self._fit_X = check_array(X, accept_sparse='csr') # Creates a g(p,x) for each tree self.hash_functions_ = [] self.trees_ = [] self.original_indices_ = [] rng = check_random_state(self.random_state) int_max = np.iinfo(np.int32).max for i in range(self.n_estimators): # This is g(p,x) for a particular tree. # Builds a single tree. Hashing is done on an array of data points. # `GaussianRandomProjection` is used for hashing. # `n_components=hash size and n_features=n_dim. hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE, rng.randint(0, int_max)) hashes = hasher.fit_transform(self._fit_X)[:, 0] original_index = np.argsort(hashes) bin_hashes = hashes[original_index] self.original_indices_.append(original_index) self.trees_.append(bin_hashes) self.hash_functions_.append(hasher) self._generate_masks() return self def _query(self, X): """Performs descending phase to find maximum depth.""" # Calculate hashes of shape (n_samples, n_estimators, [hash_size]) bin_queries = np.asarray([hasher.transform(X)[:, 0] for hasher in self.hash_functions_]) bin_queries = np.rollaxis(bin_queries, 1) # descend phase depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE, self._left_mask, self._right_mask) for tree, tree_queries in zip(self.trees_, np.rollaxis(bin_queries, 1))] return bin_queries, np.max(depths, axis=0) def kneighbors(self, X, n_neighbors=None, return_distance=True): """Returns n_neighbors of approximate nearest neighbors. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. n_neighbors : int, opitonal (default = None) Number of neighbors required. If not provided, this will return the number specified at the initialization. return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples, n_neighbors) Array representing the cosine distances to each point, only present if return_distance=True. ind : array, shape (n_samples, n_neighbors) Indices of the approximate nearest points in the population matrix. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if n_neighbors is None: n_neighbors = self.n_neighbors X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_candidates(X[[i]], max_depth[i], bin_queries[i], n_neighbors) neighbors.append(neighs) distances.append(dists) if return_distance: return np.array(distances), np.array(neighbors) else: return np.array(neighbors) def radius_neighbors(self, X, radius=None, return_distance=True): """Finds the neighbors within a given radius of a point or points. Return the indices and distances of some points from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance to their query point. LSH Forest being an approximate method, some true neighbors from the indexed dataset might be missing from the results. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples,) of arrays Each element is an array representing the cosine distances to some points found within ``radius`` of the respective query. Only present if ``return_distance=True``. ind : array, shape (n_samples,) of arrays Each element is an array of indices for neighbors within ``radius`` of the respective query. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if radius is None: radius = self.radius X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i], bin_queries[i], radius) neighbors.append(neighs) distances.append(dists) if return_distance: return _array_of_arrays(distances), _array_of_arrays(neighbors) else: return _array_of_arrays(neighbors) def partial_fit(self, X, y=None): """ Inserts new data into the already fitted LSH Forest. Cost is proportional to new total size, so additions should be batched. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) New data point to be inserted into the LSH Forest. """ X = check_array(X, accept_sparse='csr') if not hasattr(self, 'hash_functions_'): return self.fit(X) if X.shape[1] != self._fit_X.shape[1]: raise ValueError("Number of features in X and" " fitted array does not match.") n_samples = X.shape[0] n_indexed = self._fit_X.shape[0] for i in range(self.n_estimators): bin_X = self.hash_functions_[i].transform(X)[:, 0] # gets the position to be added in the tree. positions = self.trees_[i].searchsorted(bin_X) # adds the hashed value into the tree. self.trees_[i] = np.insert(self.trees_[i], positions, bin_X) # add the entry into the original_indices_. self.original_indices_[i] = np.insert(self.original_indices_[i], positions, np.arange(n_indexed, n_indexed + n_samples)) # adds the entry into the input_array. if sparse.issparse(X) or sparse.issparse(self._fit_X): self._fit_X = sparse.vstack((self._fit_X, X)) else: self._fit_X = np.row_stack((self._fit_X, X)) return self
bsd-3-clause
LohithBlaze/scikit-learn
sklearn/tests/test_common.py
127
7665
""" General tests for all estimators in sklearn. """ # Authors: Andreas Mueller <[email protected]> # Gael Varoquaux [email protected] # License: BSD 3 clause from __future__ import print_function import os import warnings import sys import pkgutil from sklearn.externals.six import PY3 from sklearn.utils.testing import assert_false, clean_warning_registry from sklearn.utils.testing import all_estimators from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_in from sklearn.utils.testing import ignore_warnings import sklearn from sklearn.cluster.bicluster import BiclusterMixin from sklearn.linear_model.base import LinearClassifierMixin from sklearn.utils.estimator_checks import ( _yield_all_checks, CROSS_DECOMPOSITION, check_parameters_default_constructible, check_class_weight_balanced_linear_classifier, check_transformer_n_iter, check_non_transformer_estimators_n_iter, check_get_params_invariance) def test_all_estimator_no_base_class(): # test that all_estimators doesn't find abstract classes. for name, Estimator in all_estimators(): msg = ("Base estimators such as {0} should not be included" " in all_estimators").format(name) assert_false(name.lower().startswith('base'), msg=msg) def test_all_estimators(): # Test that estimators are default-constructible, clonable # and have working repr. estimators = all_estimators(include_meta_estimators=True) # Meta sanity-check to make sure that the estimator introspection runs # properly assert_greater(len(estimators), 0) for name, Estimator in estimators: # some can just not be sensibly default constructed yield check_parameters_default_constructible, name, Estimator def test_non_meta_estimators(): # input validation etc for non-meta estimators estimators = all_estimators() for name, Estimator in estimators: if issubclass(Estimator, BiclusterMixin): continue if name.startswith("_"): continue for check in _yield_all_checks(name, Estimator): yield check, name, Estimator def test_configure(): # Smoke test the 'configure' step of setup, this tests all the # 'configure' functions in the setup.pys in the scikit cwd = os.getcwd() setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..')) setup_filename = os.path.join(setup_path, 'setup.py') if not os.path.exists(setup_filename): return try: os.chdir(setup_path) old_argv = sys.argv sys.argv = ['setup.py', 'config'] clean_warning_registry() with warnings.catch_warnings(): # The configuration spits out warnings when not finding # Blas/Atlas development headers warnings.simplefilter('ignore', UserWarning) if PY3: with open('setup.py') as f: exec(f.read(), dict(__name__='__main__')) else: execfile('setup.py', dict(__name__='__main__')) finally: sys.argv = old_argv os.chdir(cwd) def test_class_weight_balanced_linear_classifiers(): classifiers = all_estimators(type_filter='classifier') clean_warning_registry() with warnings.catch_warnings(record=True): linear_classifiers = [ (name, clazz) for name, clazz in classifiers if 'class_weight' in clazz().get_params().keys() and issubclass(clazz, LinearClassifierMixin)] for name, Classifier in linear_classifiers: if name == "LogisticRegressionCV": # Contrary to RidgeClassifierCV, LogisticRegressionCV use actual # CV folds and fit a model for each CV iteration before averaging # the coef. Therefore it is expected to not behave exactly as the # other linear model. continue yield check_class_weight_balanced_linear_classifier, name, Classifier @ignore_warnings def test_import_all_consistency(): # Smoke test to check that any name in a __all__ list is actually defined # in the namespace of the module or package. pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.', onerror=lambda _: None) submods = [modname for _, modname, _ in pkgs] for modname in submods + ['sklearn']: if ".tests." in modname: continue package = __import__(modname, fromlist="dummy") for name in getattr(package, '__all__', ()): if getattr(package, name, None) is None: raise AttributeError( "Module '{0}' has no attribute '{1}'".format( modname, name)) def test_root_import_all_completeness(): EXCEPTIONS = ('utils', 'tests', 'base', 'setup') for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__, onerror=lambda _: None): if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS: continue assert_in(modname, sklearn.__all__) def test_non_transformer_estimators_n_iter(): # Test that all estimators of type which are non-transformer # and which have an attribute of max_iter, return the attribute # of n_iter atleast 1. for est_type in ['regressor', 'classifier', 'cluster']: regressors = all_estimators(type_filter=est_type) for name, Estimator in regressors: # LassoLars stops early for the default alpha=1.0 for # the iris dataset. if name == 'LassoLars': estimator = Estimator(alpha=0.) else: estimator = Estimator() if hasattr(estimator, "max_iter"): # These models are dependent on external solvers like # libsvm and accessing the iter parameter is non-trivial. if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC', 'RidgeClassifier', 'SVC', 'RandomizedLasso', 'LogisticRegressionCV']): continue # Tested in test_transformer_n_iter below elif (name in CROSS_DECOMPOSITION or name in ['LinearSVC', 'LogisticRegression']): continue else: # Multitask models related to ENet cannot handle # if y is mono-output. yield (check_non_transformer_estimators_n_iter, name, estimator, 'Multi' in name) def test_transformer_n_iter(): transformers = all_estimators(type_filter='transformer') for name, Estimator in transformers: estimator = Estimator() # Dependent on external solvers and hence accessing the iter # param is non-trivial. external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding', 'RandomizedLasso', 'LogisticRegressionCV'] if hasattr(estimator, "max_iter") and name not in external_solver: yield check_transformer_n_iter, name, estimator def test_get_params_invariance(): # Test for estimators that support get_params, that # get_params(deep=False) is a subset of get_params(deep=True) # Related to issue #4465 estimators = all_estimators(include_meta_estimators=False, include_other=True) for name, Estimator in estimators: if hasattr(Estimator, 'get_params'): yield check_get_params_invariance, name, Estimator
bsd-3-clause
katyhuff/moose
modules/porous_flow/doc/tests/sinks.py
14
7648
#!/usr/bin/env python import os import sys import numpy as np import matplotlib.pyplot as plt def expected_s01(t): bulk = 1.3 dens0 = 1.1 rate = -6 por = 0.1 area = 0.5 vol = 0.25 p0 = 1 initial_mass = vol * por * dens0 * np.exp(p0 / bulk) return initial_mass + rate * area * t def expected_s02(t): # rho = rho0*exp(rate*area*perm*t/visc/vol/por) bulk = 1.3 dens0 = 1.1 rate = -6 por = 0.1 area = 0.5 vol = 0.25 p0 = 1 visc = 1.1 perm = 0.2 initial_dens = dens0 * np.exp(p0 / bulk) return vol * por * initial_dens * np.exp(rate * area * perm * t / visc / vol / por) def expected_s03(s): rate = 6 area = 0.5 return rate * area * s * s def expected_s04(p): return [8 * min(max(pp + 0.2, 0.5), 1) for pp in p] def expected_s05(p): fcn = 6 center = 0.9 sd = 0.5 return [fcn * np.exp(-0.5 * pow(min((pp - center) / sd, 0), 2)) for pp in p] def expected_s06(p): fcn = 3 center = 0.9 cutoff = -0.8 xx = p - center return [fcn if (x >= 0) else ( 0 if (x <= cutoff) else fcn / pow(cutoff, 3) * (2 * x + cutoff) * pow(x - cutoff, 2)) for x in xx] def expected_s07(f): rate = 6 area = 0.5 return rate * area * f def expected_s08(pc): mass_frac = 0.8 rate = 100 area = 0.5 al = 1.1 m = 0.5 sg = 1 - pow(1.0 + pow(al * pc, 1.0 / (1.0 - m)), -m) return rate * area * mass_frac * sg * sg def s01(): f = open("../../tests/sinks/gold/s01.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] m00 = [d[2] for d in data] t = [d[0] for d in data] return (t, m00) def s02(): f = open("../../tests/sinks/gold/s02.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] m00 = [d[1] for d in data] t = [d[0] for d in data] return (t, m00) def s03(): f = open("../../tests/sinks/gold/s03.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] rate00 = [d[5] for d in data] s = [d[10] for d in data] return (s, rate00) def s04(): f = open("../../tests/sinks/gold/s04.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] rate10 = [d[3] for d in data] p = [d[9] for d in data] return (p, rate10) def s05(): f = open("../../tests/sinks/gold/s05.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] rate10 = [2*d[1] for d in data] p = [d[10] for d in data] return (p, rate10) def s06(): f = open("../../tests/sinks/gold/s06.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] prate = sorted(list(set([(d[11], d[1]) for d in data] + [(d[12], d[2]) for d in data] + [(d[13], d[3]) for d in data] + [(d[14], d[4]) for d in data])), key = lambda x: x[0]) return zip(*prate) def s07(): f = open("../../tests/sinks/gold/s07.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] massfrac = [d[1] for d in data] flux = [d[4] for d in data] return (massfrac, flux) def s08(): f = open("../../tests/sinks/gold/s08.csv") data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()] rate00 = [d[1] for d in data] pc = [(d[6] - d[7]) for d in data] return (pc, rate00) plt.figure() moose_results = s01() mooset = moose_results[0] moosem = moose_results[1] delt = (mooset[-1] - mooset[0])/100 tpoints = np.arange(mooset[0] - delt, mooset[-1] + delt, delt) plt.plot(tpoints, expected_s01(tpoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(mooset, moosem, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'upper right') plt.xlabel("t (s)") plt.ylabel("Nodal mass (kg)") plt.title("Basic sink") plt.savefig("s01.pdf") plt.figure() moose_results = s02() mooset = moose_results[0] moosem = moose_results[1] delt = (mooset[-1] - mooset[0])/100 tpoints = np.arange(mooset[0] - delt, mooset[-1] + delt, delt) plt.plot(tpoints, expected_s02(tpoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(mooset, moosem, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'upper right') plt.xlabel("t (s)") plt.ylabel("Nodal mass (kg)") plt.title("Basic sink with mobility multiplier") plt.savefig("s02.pdf") plt.figure() moose_results = s03() mooses = moose_results[0] mooser = moose_results[1] dels = (mooses[0] - mooses[-1])/100 spoints = np.arange(mooses[-1] - dels, mooses[0] + dels, dels) plt.plot(spoints, expected_s03(spoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(mooses, mooser, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'upper right') plt.xlabel("Saturation") plt.ylabel("Sink rate") plt.title("Basic sink with relative-permeability multiplier") plt.savefig("s03.pdf") plt.figure() moose_results = s04() moosep = moose_results[0] mooser = moose_results[1] delp = (moosep[0] - moosep[-1])/100 ppoints = np.arange(moosep[-1] - delp, moosep[0] + delp, delp) plt.plot(ppoints, expected_s04(ppoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(moosep, mooser, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'lower right') plt.xlabel("Porepressure (Pa)") plt.ylabel("Sink rate (kg/m^2/s)") plt.title("Piecewise-linear sink") plt.axis([0.1, 1, 3.9, 8.1]) plt.savefig("s04.pdf") plt.figure() moose_results = s05() moosep = moose_results[0] mooser = moose_results[1] delp = (moosep[0] - moosep[-1])/100 ppoints = np.arange(moosep[-1] - delp, moosep[0] + delp, delp) plt.plot(ppoints, expected_s05(ppoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(moosep, mooser, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'lower right') plt.xlabel("Porepressure (Pa)") plt.ylabel("Sink rate (kg/m^2/s)") plt.title("Half-Gaussian sink") plt.axis([-0.4, 1.2, 0, 6.1]) plt.savefig("s05.pdf") plt.figure() moose_results = s06() moosep = moose_results[0] mooser = moose_results[1] delp = (moosep[-1] - moosep[0])/100 ppoints = np.arange(moosep[0] - delp, moosep[-1] + delp, delp) plt.plot(ppoints, expected_s06(ppoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(moosep, mooser, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'lower right') plt.xlabel("Porepressure (Pa)") plt.ylabel("Sink rate (kg/m^2/s)") plt.title("Half-cubic sink") plt.axis([-0.1, 1.3, -0.1, 3.1]) plt.savefig("s06.pdf") plt.figure() moose_results = s07() moosefrac = moose_results[0] mooseflux = moose_results[1] delfrac = (moosefrac[0] - moosefrac[-1])/100 fpoints = np.arange(moosefrac[-1] - delfrac, moosefrac[0] + delfrac, delfrac) plt.plot(fpoints, expected_s07(fpoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(moosefrac, mooseflux, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'lower right') plt.xlabel("Mass fraction") plt.ylabel("Sink rate (kg/m^2/s)") plt.title("Mass-fraction dependent sink") plt.savefig("s07.pdf") plt.figure() moose_results = s08() moosepc = moose_results[0] mooser = moose_results[1] delpc = (moosepc[0] - moosepc[-1])/100 pcpoints = np.arange(moosepc[-1] - dels, moosepc[0] + dels, dels) plt.plot(pcpoints, expected_s08(pcpoints), 'k-', linewidth = 3.0, label = 'expected') plt.plot(moosepc, mooser, 'rs', markersize = 10.0, label = 'MOOSE') plt.legend(loc = 'lower right') plt.xlabel("Capillary pressure (Pa)") plt.ylabel("Sink rate (kg/m^2/s)") plt.title("Mass-fraction and relative-permeability dependent sink (2 phase, 3 comp)") plt.savefig("s08.pdf") sys.exit(0)
lgpl-2.1
ycaihua/scikit-learn
examples/manifold/plot_manifold_sphere.py
258
5101
#!/usr/bin/python # -*- coding: utf-8 -*- """ ============================================= Manifold Learning methods on a severed sphere ============================================= An application of the different :ref:`manifold` techniques on a spherical data-set. Here one can see the use of dimensionality reduction in order to gain some intuition regarding the manifold learning methods. Regarding the dataset, the poles are cut from the sphere, as well as a thin slice down its side. This enables the manifold learning techniques to 'spread it open' whilst projecting it onto two dimensions. For a similar example, where the methods are applied to the S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py` Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is to find a low-dimensional representation of the data (here 2D) in which the distances respect well the distances in the original high-dimensional space, unlike other manifold-learning algorithms, it does not seeks an isotropic representation of the data in the low-dimensional space. Here the manifold problem matches fairly that of representing a flat map of the Earth, as with `map projection <http://en.wikipedia.org/wiki/Map_projection>`_ """ # Author: Jaques Grobler <[email protected]> # License: BSD 3 clause print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter from sklearn import manifold from sklearn.utils import check_random_state # Next line to silence pyflakes. Axes3D # Variables for manifold learning. n_neighbors = 10 n_samples = 1000 # Create our sphere. random_state = check_random_state(0) p = random_state.rand(n_samples) * (2 * np.pi - 0.55) t = random_state.rand(n_samples) * np.pi # Sever the poles from the sphere. indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8)))) colors = p[indices] x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \ np.sin(t[indices]) * np.sin(p[indices]), \ np.cos(t[indices]) # Plot our dataset. fig = plt.figure(figsize=(15, 8)) plt.suptitle("Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14) ax = fig.add_subplot(251, projection='3d') ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow) try: # compatibility matplotlib < 1.0 ax.view_init(40, -10) except: pass sphere_data = np.array([x, y, z]).T # Perform Locally Linear Embedding Manifold learning methods = ['standard', 'ltsa', 'hessian', 'modified'] labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE'] for i, method in enumerate(methods): t0 = time() trans_data = manifold\ .LocallyLinearEmbedding(n_neighbors, 2, method=method).fit_transform(sphere_data).T t1 = time() print("%s: %.2g sec" % (methods[i], t1 - t0)) ax = fig.add_subplot(252 + i) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % (labels[i], t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Isomap Manifold learning. t0 = time() trans_data = manifold.Isomap(n_neighbors, n_components=2)\ .fit_transform(sphere_data).T t1 = time() print("%s: %.2g sec" % ('ISO', t1 - t0)) ax = fig.add_subplot(257) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Multi-dimensional scaling. t0 = time() mds = manifold.MDS(2, max_iter=100, n_init=1) trans_data = mds.fit_transform(sphere_data).T t1 = time() print("MDS: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(258) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("MDS (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform Spectral Embedding. t0 = time() se = manifold.SpectralEmbedding(n_components=2, n_neighbors=n_neighbors) trans_data = se.fit_transform(sphere_data).T t1 = time() print("Spectral Embedding: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(259) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') # Perform t-distributed stochastic neighbor embedding. t0 = time() tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) trans_data = tsne.fit_transform(sphere_data).T t1 = time() print("t-SNE: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(250) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("t-SNE (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis('tight') plt.show()
bsd-3-clause
asteca/ASteCA
packages/out/cornerPlot.py
1
12939
import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from scipy.ndimage import gaussian_filter from matplotlib.colors import LinearSegmentedColormap, colorConverter from .prep_plots import SigmaEllipse def hist2d( ax, x, y, bins=20, range=None, weights=None, levels=None, smooth=None, color=None, quiet=True, plot_datapoints=True, plot_density=True, plot_contours=True, no_fill_contours=False, fill_contours=False, contour_kwargs=None, contourf_kwargs=None, data_kwargs=None, **kwargs): """ Plot a 2-D histogram of samples. Source: https://corner.readthedocs.io Copyright (c) 2013-2016 Daniel Foreman-Mackey Parameters ---------- x : array_like[nsamples,] The samples. y : array_like[nsamples,] The samples. quiet : bool If true, suppress warnings for small datasets. levels : array_like The contour levels to draw. ax : matplotlib.Axes A axes instance on which to add the 2-D histogram. plot_datapoints : bool Draw the individual data points. plot_density : bool Draw the density colormap. plot_contours : bool Draw the contours. no_fill_contours : bool Add no filling at all to the contours (unlike setting ``fill_contours=False``, which still adds a white fill at the densest points). fill_contours : bool Fill the contours. contour_kwargs : dict Any additional keyword arguments to pass to the `contour` method. contourf_kwargs : dict Any additional keyword arguments to pass to the `contourf` method. data_kwargs : dict Any additional keyword arguments to pass to the `plot` method when adding the individual data points. """ # Set the default range based on the data range if not provided. if range is None: range = [[x.min(), x.max()], [y.min(), y.max()]] # Set up the default plotting arguments. if color is None: color = "k" # Choose the default "sigma" contour levels. if levels is None: levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2) # This is the color map for the density plot, over-plotted to indicate the # density of the points near the center. density_cmap = LinearSegmentedColormap.from_list( "density_cmap", [color, (1, 1, 1, 0)]) # This color map is used to hide the points at the high density areas. white_cmap = LinearSegmentedColormap.from_list( "white_cmap", [(1, 1, 1), (1, 1, 1)], N=2) # This "color map" is the list of colors for the contour levels if the # contours are filled. rgba_color = colorConverter.to_rgba(color) contour_cmap = [list(rgba_color) for l in levels] + [rgba_color] for i, l in enumerate(levels): contour_cmap[i][-1] *= float(i) / (len(levels) + 1) # We'll make the 2D histogram to directly estimate the density. try: H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins, range=list(map(np.sort, range)), weights=weights) except ValueError: print(" It looks like at least one of your sample columns " " have no dynamic range. You could try using the " " 'range' argument.") return if smooth is not None: H = gaussian_filter(H, smooth) if plot_contours or plot_density: # Compute the density levels. Hflat = H.flatten() inds = np.argsort(Hflat)[::-1] Hflat = Hflat[inds] sm = np.cumsum(Hflat) sm /= sm[-1] V = np.empty(len(levels)) for i, v0 in enumerate(levels): try: V[i] = Hflat[sm <= v0][-1] except: V[i] = Hflat[0] V.sort() m = np.diff(V) == 0 if np.any(m) and not quiet: print(" Too few points to create valid contours") while np.any(m): V[np.where(m)[0][0]] *= 1.0 - 1e-4 m = np.diff(V) == 0 V.sort() # Compute the bin centers. X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1]) # Extend the array for the sake of the contours at the plot edges. H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4)) H2[2:-2, 2:-2] = H H2[2:-2, 1] = H[:, 0] H2[2:-2, -2] = H[:, -1] H2[1, 2:-2] = H[0] H2[-2, 2:-2] = H[-1] H2[1, 1] = H[0, 0] H2[1, -2] = H[0, -1] H2[-2, 1] = H[-1, 0] H2[-2, -2] = H[-1, -1] X2 = np.concatenate([ X1[0] + np.array([-2, -1]) * np.diff(X1[:2]), X1, X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]), ]) Y2 = np.concatenate([ Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]), Y1, Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]), ]) if plot_datapoints: if data_kwargs is None: data_kwargs = dict() data_kwargs["color"] = data_kwargs.get("color", color) data_kwargs["ms"] = data_kwargs.get("ms", 2.0) data_kwargs["mec"] = data_kwargs.get("mec", "none") data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1) ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs) # Plot the base fill to hide the densest data points. if (plot_contours or plot_density) and not no_fill_contours: ax.contourf(X2, Y2, H2.T, [V.min(), H.max()], cmap=white_cmap, antialiased=False) if plot_contours and fill_contours: if contourf_kwargs is None: contourf_kwargs = dict() contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap) contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased", False) ax.contourf( X2, Y2, H2.T, np.concatenate([[0], V, [H.max() * (1 + 1e-4)]]), **contourf_kwargs) # Plot the density map. This can't be plotted at the same time as the # contour fills. elif plot_density: ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap) # Plot the contour edge colors. if plot_contours: if contour_kwargs is None: contour_kwargs = dict() contour_kwargs["colors"] = contour_kwargs.get("colors", color) ax.contour(X2, Y2, H2.T, V, **contour_kwargs) # ax.set_xlim(range[0]) # ax.set_ylim(range[1]) def pl_2_param_dens(_2_params, gs, min_max_p2, varIdxs, params_trace): ''' Parameter vs parameters density map. ''' plot_dict = { 'metal-age': [0, 2, 2, 4, 0, 1], 'metal-ext': [0, 2, 4, 6, 0, 2], 'metal-dist': [0, 2, 6, 8, 0, 3], 'metal-mass': [0, 2, 8, 10, 0, 4], 'metal-binar': [0, 2, 10, 12, 0, 5], 'age-ext': [2, 4, 4, 6, 1, 2], 'age-dist': [2, 4, 6, 8, 1, 3], 'age-mass': [2, 4, 8, 10, 1, 4], 'age-binar': [2, 4, 10, 12, 1, 5], 'ext-dist': [4, 6, 6, 8, 2, 3], 'ext-mass': [4, 6, 8, 10, 2, 4], 'ext-binar': [4, 6, 10, 12, 2, 5], 'dist-mass': [6, 8, 8, 10, 3, 4], 'dist-binar': [6, 8, 10, 12, 3, 5], 'mass-binar': [8, 10, 10, 12, 4, 5] } labels = ['$z$', '$log(age)$', '$E_{(B-V)}$', '$(m-M)_o$', '$M\,(M_{{\odot}})$', '$b_{frac}$'] gs_x1, gs_x2, gs_y1, gs_y2, mx, my = plot_dict[_2_params] x_label, y_label = labels[mx], labels[my] ax = plt.subplot(gs[gs_y1:gs_y2, gs_x1:gs_x2]) # To specify the number of ticks on both or any single axes ax.locator_params(nbins=5) if gs_x1 == 0: plt.ylabel(y_label, fontsize=11) plt.yticks(rotation=45) else: ax.tick_params(labelleft=False) if gs_y2 == 12: plt.xlabel(x_label, fontsize=11) plt.xticks(rotation=45) else: ax.tick_params(labelbottom=False) plt.minorticks_on() if mx in varIdxs and my in varIdxs: mx_model, my_model = varIdxs.index(mx), varIdxs.index(my) ax.set_title(r"$\rho={:.2f}$".format(np.corrcoef( [params_trace[mx_model], params_trace[my_model]])[0][1]), fontsize=11) hist2d(ax, params_trace[mx_model], params_trace[my_model]) mean_pos, width, height, theta = SigmaEllipse(np.array([ params_trace[mx_model], params_trace[my_model]]).T) # Plot 95% confidence ellipse. plt.scatter( mean_pos[0], mean_pos[1], marker='x', c='b', s=30, linewidth=2, zorder=4) ellipse = Ellipse(xy=mean_pos, width=width, height=height, angle=theta, edgecolor='r', fc='None', lw=.7, zorder=4) ax.add_patch(ellipse) xp_min, xp_max, yp_min, yp_max = min_max_p2 ax.set_xlim([xp_min, xp_max]) ax.set_ylim([yp_min, yp_max]) # Grid won't respect 'zorder': # https://github.com/matplotlib/matplotlib/issues/5045 # So we plot the grid behind everything else manually. xlocs, xlabels = plt.xticks() ylocs, ylabels = plt.yticks() for xt in xlocs: plt.axvline(x=xt, linestyle='-', color='w', zorder=-4) for yt in ylocs: plt.axhline(y=yt, linestyle='-', color='w', zorder=-4) def pl_param_pf( par_name, gs, min_max_p, varIdxs, mean_sol, map_sol, median_sol, mode_sol, param_r2, pardist_kde, model_done): ''' Parameter posterior plot. ''' plot_dict = { 'metal': [0, 2, 0, 2, 0], 'age': [2, 4, 2, 4, 1], 'ext': [4, 6, 4, 6, 2], 'dist': [6, 8, 6, 8, 3], 'mass': [8, 10, 8, 10, 4], 'binar': [10, 12, 10, 12, 5] } gs_x1, gs_x2, gs_y1, gs_y2, cp = plot_dict[par_name] labels = [r'$z$', r'$\log(age)$', r'$E_{{(B-V)}}$', r'$(m-M)_o$', r'$M\,(M_{{\odot}})$', r'$b_{{frac}}$'] frm = ["{:.4f}", "{:.3f}", "{:.3f}", "{:.3f}", "{:.0f}", "{:.2f}"] ld_p = labels[cp] p = frm[cp] ax = plt.subplot(gs[gs_y1:gs_y2, gs_x1:gs_x2]) plt.title( ld_p + r"$\;[R^2\approx$" + "{:.2f}]".format(param_r2[cp]), fontsize=11) # Set x axis limit. xp_min, xp_max = min_max_p[cp] ax.set_xlim(xp_min, xp_max) ax.locator_params(nbins=5) # Set minor ticks ax.minorticks_on() if cp == 5: plt.xlabel(ld_p, fontsize=11) plt.xticks(rotation=45) # else: # ax.tick_params(labelbottom=False) ax.tick_params(axis='y', which='major', labelleft=False) if cp in varIdxs: c_model = varIdxs.index(cp) # Plot KDE. if pardist_kde[c_model]: x_kde, par_kde = pardist_kde[c_model] plt.plot(x_kde, par_kde / max(par_kde), color='k', lw=1.5) # Obtain the bin values and edges using numpy hist, bin_edges = np.histogram(model_done[c_model], bins='auto') if len(bin_edges) > 25: hist, bin_edges = np.histogram(model_done[c_model], bins=20) # Plot bars with the proper positioning, height, and width. plt.bar( (bin_edges[1:] + bin_edges[:-1]) * .5, hist / float(hist.max()), width=(bin_edges[1] - bin_edges[0]), color='grey', alpha=0.3) # Mean plt.axvline( x=mean_sol[cp], linestyle='--', color='blue', zorder=4, label=("Mean (" + p + ")").format(mean_sol[cp])) # MAP plt.axvline( x=map_sol[cp], linestyle='--', color='red', zorder=4, label=("MAP (" + p + ")").format(map_sol[cp])) # Median plt.axvline( x=median_sol[cp], linestyle=':', color='green', zorder=4, label=("Median (" + p + ")").format(median_sol[cp])) # Mode plt.axvline( x=mode_sol[cp], linestyle='--', color='cyan', zorder=4, label=("Mode (" + p + ")").format(mode_sol[cp])) # 16th and 84th percentiles (1 sigma) around median. ph = np.percentile(model_done[c_model], 84) pl = np.percentile(model_done[c_model], 16) plt.axvline( x=ph, linestyle=':', color='orange', lw=1.5, zorder=4, label=("16-84th perc\n(" + p + ", " + p + ")").format(pl, ph)) plt.axvline(x=pl, linestyle=':', color='orange', lw=1.5, zorder=4) cur_ylim = ax.get_ylim() ax.set_ylim([0, cur_ylim[1]]) plt.legend(fontsize='small') def plot(N, *args): """ Handle each plot separately. """ plt_map = { 0: [pl_2_param_dens, args[0] + ' density map'], 1: [pl_param_pf, args[0] + ' probability function'], } fxn = plt_map.get(N, None)[0] if fxn is None: raise ValueError(" ERROR: there is no plot {}.".format(N)) try: fxn(*args) except Exception: import traceback print(traceback.format_exc()) print(" WARNING: error when plotting {}".format(plt_map.get(N)[1]))
gpl-3.0
fassio/stopeight
setup-old.py
2
4507
#!/usr/bin/env python import os #distutils start __import__('python') from python import get_pybind_include, BuildExt _include_dirs=[ os.path.join('stopeight-clibs','cmake-git-version-tracking','better-example'), # Path to pybind11 headers get_pybind_include(), ] _qt5_include_dirs=_include_dirs _library_dirs=[] #distutils end from setuptools import setup, Extension, find_namespace_packages setup( use_scm_version=True, packages=find_namespace_packages(include=['stopeight.*']), entry_points={ 'setuptools.installation':['eggsecutable = stopeight.util.editor.dispatch:main_func',] }, setup_requires=[ 'pybind11>=2.5.0', 'setuptools_scm', ], zip_safe=False, #distutils start #pip start install_requires=[ 'numpy', 'matplotlib', 'PyQt5',# >5.11.0: install with pip, not easy_setup! ], #pip end ext_modules = [ Extension( 'stopeight.grapher', [os.path.join('stopeight-clibs','grapher-wrappers','IFPyGrapher.cpp')], include_dirs=_include_dirs + [ os.path.join('stopeight-clibs','include'), os.path.join('stopeight-clibs','grapher') ], library_dirs=_library_dirs, libraries=['stopeight-clibs-grapher', 'stopeight-clibs-matrix'], language='c++', optional=True ), Extension( 'stopeight.matrix', [os.path.join('stopeight-clibs','matrix-wrappers','IFPyMatrix.cpp')], include_dirs=_include_dirs + [ os.path.join('stopeight-clibs','include'), os.path.join('stopeight-clibs','matrix') ], library_dirs=_library_dirs, libraries=['stopeight-clibs-matrix'], language='c++', optional=True ), Extension( 'stopeight.analyzer', [os.path.join('stopeight-clibs','analyzer-wrappers','IFPyAnalyzer.cpp')], include_dirs=_include_dirs + [ os.path.join('stopeight-clibs','include'), os.path.join('stopeight-clibs','analyzer') ], library_dirs=_library_dirs, libraries=['stopeight-clibs-analyzer'], language='c++', optional=True ), Extension( 'stopeight.legacy', [os.path.join('stopeight-clibs','legacy-wrappers','interfacepython.cpp'), ], include_dirs=_qt5_include_dirs + [ os.path.join('stopeight-clibs','legacy/include'), os.path.join('stopeight-clibs','legacy-wrappers') ], library_dirs=_library_dirs, libraries=['stopeight-clibs-legacy','Qt5Core'],#Qt5Core needed for old wrapper language='c++', optional=True ), Extension( 'stopeight.getters', [os.path.join('stopeight-clibs','legacy-wrappers','IFPyGetters.cpp'), os.path.join('stopeight-clibs','legacy-wrappers','IFPyShared.cpp')], include_dirs=_qt5_include_dirs + [ os.path.join('stopeight-clibs','legacy/include'), os.path.join('stopeight-clibs','legacy-wrappers') ], library_dirs=_library_dirs, libraries=['stopeight-clibs-legacy','stopeight-clibs-matrix','Qt5Core'],#Qt5Core needed for old wrapper language='c++', optional=True ), Extension( 'stopeight.finders', [os.path.join('stopeight-clibs','legacy-wrappers','IFPyFinders.cpp'), os.path.join('stopeight-clibs','legacy-wrappers','IFPyShared.cpp')], include_dirs=_qt5_include_dirs + [ os.path.join('stopeight-clibs','legacy/include'), os.path.join('stopeight-clibs','legacy-wrappers') ], library_dirs=_library_dirs, libraries=['stopeight-clibs-legacy','stopeight-clibs-matrix','Qt5Core'],#Qt5Core needed for old wrapper language='c++', optional=True ), ], cmdclass={'build_ext': BuildExt}, #distutils end )
gpl-2.0
zaxtax/scikit-learn
doc/sphinxext/gen_rst.py
106
40198
""" Example generation for the scikit learn Generate the rst files for the examples by iterating over the python example files. Files that generate images should start with 'plot' """ from __future__ import division, print_function from time import time import ast import os import re import shutil import traceback import glob import sys import gzip import posixpath import subprocess import warnings from sklearn.externals import six # Try Python 2 first, otherwise load from Python 3 try: from StringIO import StringIO import cPickle as pickle import urllib2 as urllib from urllib2 import HTTPError, URLError except ImportError: from io import StringIO import pickle import urllib.request import urllib.error import urllib.parse from urllib.error import HTTPError, URLError try: # Python 2 built-in execfile except NameError: def execfile(filename, global_vars=None, local_vars=None): with open(filename, encoding='utf-8') as f: code = compile(f.read(), filename, 'exec') exec(code, global_vars, local_vars) try: basestring except NameError: basestring = str import token import tokenize import numpy as np try: # make sure that the Agg backend is set before importing any # matplotlib import matplotlib matplotlib.use('Agg') except ImportError: # this script can be imported by nosetest to find tests to run: we should not # impose the matplotlib requirement in that case. pass from sklearn.externals import joblib ############################################################################### # A tee object to redict streams to multiple outputs class Tee(object): def __init__(self, file1, file2): self.file1 = file1 self.file2 = file2 def write(self, data): self.file1.write(data) self.file2.write(data) def flush(self): self.file1.flush() self.file2.flush() ############################################################################### # Documentation link resolver objects def _get_data(url): """Helper function to get data over http or from a local file""" if url.startswith('http://'): # Try Python 2, use Python 3 on exception try: resp = urllib.urlopen(url) encoding = resp.headers.dict.get('content-encoding', 'plain') except AttributeError: resp = urllib.request.urlopen(url) encoding = resp.headers.get('content-encoding', 'plain') data = resp.read() if encoding == 'plain': pass elif encoding == 'gzip': data = StringIO(data) data = gzip.GzipFile(fileobj=data).read() else: raise RuntimeError('unknown encoding') else: with open(url, 'r') as fid: data = fid.read() fid.close() return data mem = joblib.Memory(cachedir='_build') get_data = mem.cache(_get_data) def parse_sphinx_searchindex(searchindex): """Parse a Sphinx search index Parameters ---------- searchindex : str The Sphinx search index (contents of searchindex.js) Returns ------- filenames : list of str The file names parsed from the search index. objects : dict The objects parsed from the search index. """ def _select_block(str_in, start_tag, end_tag): """Select first block delimited by start_tag and end_tag""" start_pos = str_in.find(start_tag) if start_pos < 0: raise ValueError('start_tag not found') depth = 0 for pos in range(start_pos, len(str_in)): if str_in[pos] == start_tag: depth += 1 elif str_in[pos] == end_tag: depth -= 1 if depth == 0: break sel = str_in[start_pos + 1:pos] return sel def _parse_dict_recursive(dict_str): """Parse a dictionary from the search index""" dict_out = dict() pos_last = 0 pos = dict_str.find(':') while pos >= 0: key = dict_str[pos_last:pos] if dict_str[pos + 1] == '[': # value is a list pos_tmp = dict_str.find(']', pos + 1) if pos_tmp < 0: raise RuntimeError('error when parsing dict') value = dict_str[pos + 2: pos_tmp].split(',') # try to convert elements to int for i in range(len(value)): try: value[i] = int(value[i]) except ValueError: pass elif dict_str[pos + 1] == '{': # value is another dictionary subdict_str = _select_block(dict_str[pos:], '{', '}') value = _parse_dict_recursive(subdict_str) pos_tmp = pos + len(subdict_str) else: raise ValueError('error when parsing dict: unknown elem') key = key.strip('"') if len(key) > 0: dict_out[key] = value pos_last = dict_str.find(',', pos_tmp) if pos_last < 0: break pos_last += 1 pos = dict_str.find(':', pos_last) return dict_out # Make sure searchindex uses UTF-8 encoding if hasattr(searchindex, 'decode'): searchindex = searchindex.decode('UTF-8') # parse objects query = 'objects:' pos = searchindex.find(query) if pos < 0: raise ValueError('"objects:" not found in search index') sel = _select_block(searchindex[pos:], '{', '}') objects = _parse_dict_recursive(sel) # parse filenames query = 'filenames:' pos = searchindex.find(query) if pos < 0: raise ValueError('"filenames:" not found in search index') filenames = searchindex[pos + len(query) + 1:] filenames = filenames[:filenames.find(']')] filenames = [f.strip('"') for f in filenames.split(',')] return filenames, objects class SphinxDocLinkResolver(object): """ Resolve documentation links using searchindex.js generated by Sphinx Parameters ---------- doc_url : str The base URL of the project website. searchindex : str Filename of searchindex, relative to doc_url. extra_modules_test : list of str List of extra module names to test. relative : bool Return relative links (only useful for links to documentation of this package). """ def __init__(self, doc_url, searchindex='searchindex.js', extra_modules_test=None, relative=False): self.doc_url = doc_url self.relative = relative self._link_cache = {} self.extra_modules_test = extra_modules_test self._page_cache = {} if doc_url.startswith('http://'): if relative: raise ValueError('Relative links are only supported for local ' 'URLs (doc_url cannot start with "http://)"') searchindex_url = doc_url + '/' + searchindex else: searchindex_url = os.path.join(doc_url, searchindex) # detect if we are using relative links on a Windows system if os.name.lower() == 'nt' and not doc_url.startswith('http://'): if not relative: raise ValueError('You have to use relative=True for the local' ' package on a Windows system.') self._is_windows = True else: self._is_windows = False # download and initialize the search index sindex = get_data(searchindex_url) filenames, objects = parse_sphinx_searchindex(sindex) self._searchindex = dict(filenames=filenames, objects=objects) def _get_link(self, cobj): """Get a valid link, False if not found""" fname_idx = None full_name = cobj['module_short'] + '.' + cobj['name'] if full_name in self._searchindex['objects']: value = self._searchindex['objects'][full_name] if isinstance(value, dict): value = value[next(iter(value.keys()))] fname_idx = value[0] elif cobj['module_short'] in self._searchindex['objects']: value = self._searchindex['objects'][cobj['module_short']] if cobj['name'] in value.keys(): fname_idx = value[cobj['name']][0] if fname_idx is not None: fname = self._searchindex['filenames'][fname_idx] + '.html' if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if hasattr(link, 'decode'): link = link.decode('utf-8', 'replace') if link in self._page_cache: html = self._page_cache[link] else: html = get_data(link) self._page_cache[link] = html # test if cobj appears in page comb_names = [cobj['module_short'] + '.' + cobj['name']] if self.extra_modules_test is not None: for mod in self.extra_modules_test: comb_names.append(mod + '.' + cobj['name']) url = False if hasattr(html, 'decode'): # Decode bytes under Python 3 html = html.decode('utf-8', 'replace') for comb_name in comb_names: if hasattr(comb_name, 'decode'): # Decode bytes under Python 3 comb_name = comb_name.decode('utf-8', 'replace') if comb_name in html: url = link + u'#' + comb_name link = url else: link = False return link def resolve(self, cobj, this_url): """Resolve the link to the documentation, returns None if not found Parameters ---------- cobj : dict Dict with information about the "code object" for which we are resolving a link. cobi['name'] : function or class name (str) cobj['module_short'] : shortened module name (str) cobj['module'] : module name (str) this_url: str URL of the current page. Needed to construct relative URLs (only used if relative=True in constructor). Returns ------- link : str | None The link (URL) to the documentation. """ full_name = cobj['module_short'] + '.' + cobj['name'] link = self._link_cache.get(full_name, None) if link is None: # we don't have it cached link = self._get_link(cobj) # cache it for the future self._link_cache[full_name] = link if link is False or link is None: # failed to resolve return None if self.relative: link = os.path.relpath(link, start=this_url) if self._is_windows: # replace '\' with '/' so it on the web link = link.replace('\\', '/') # for some reason, the relative link goes one directory too high up link = link[3:] return link ############################################################################### rst_template = """ .. _example_%(short_fname)s: %(docstring)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- """ plot_rst_template = """ .. _example_%(short_fname)s: %(docstring)s %(image_list)s %(stdout)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- **Total running time of the example:** %(time_elapsed) .2f seconds (%(time_m) .0f minutes %(time_s) .2f seconds) """ # The following strings are used when we have several pictures: we use # an html div tag that our CSS uses to turn the lists into horizontal # lists. HLIST_HEADER = """ .. rst-class:: horizontal """ HLIST_IMAGE_TEMPLATE = """ * .. image:: images/%s :scale: 47 """ SINGLE_IMAGE = """ .. image:: images/%s :align: center """ # The following dictionary contains the information used to create the # thumbnails for the front page of the scikit-learn home page. # key: first image in set # values: (number of plot in set, height of thumbnail) carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600), 'plot_outlier_detection_001.png': (3, 372), 'plot_gp_regression_001.png': (2, 250), 'plot_adaboost_twoclass_001.png': (1, 372), 'plot_compare_methods_001.png': (1, 349)} def extract_docstring(filename, ignore_heading=False): """ Extract a module-level docstring, if any """ if six.PY2: lines = open(filename).readlines() else: lines = open(filename, encoding='utf-8').readlines() start_row = 0 if lines[0].startswith('#!'): lines.pop(0) start_row = 1 docstring = '' first_par = '' line_iterator = iter(lines) tokens = tokenize.generate_tokens(lambda: next(line_iterator)) for tok_type, tok_content, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif tok_type == 'STRING': docstring = eval(tok_content) # If the docstring is formatted with several paragraphs, extract # the first one: paragraphs = '\n'.join( line.rstrip() for line in docstring.split('\n')).split('\n\n') if paragraphs: if ignore_heading: if len(paragraphs) > 1: first_par = re.sub('\n', ' ', paragraphs[1]) first_par = ((first_par[:95] + '...') if len(first_par) > 95 else first_par) else: raise ValueError("Docstring not found by gallery.\n" "Please check the layout of your" " example file:\n {}\n and make sure" " it's correct".format(filename)) else: first_par = paragraphs[0] break return docstring, first_par, erow + 1 + start_row def generate_example_rst(app): """ Generate the list of examples, as well as the contents of examples. """ root_dir = os.path.join(app.builder.srcdir, 'auto_examples') example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..', 'examples')) generated_dir = os.path.abspath(os.path.join(app.builder.srcdir, 'modules', 'generated')) try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not os.path.exists(example_dir): os.makedirs(example_dir) if not os.path.exists(root_dir): os.makedirs(root_dir) if not os.path.exists(generated_dir): os.makedirs(generated_dir) # we create an index.rst with all examples fhindex = open(os.path.join(root_dir, 'index.rst'), 'w') # Note: The sidebar button has been removed from the examples page for now # due to how it messes up the layout. Will be fixed at a later point fhindex.write("""\ .. raw:: html <style type="text/css"> div#sidebarbutton { /* hide the sidebar collapser, while ensuring vertical arrangement */ display: none; } </style> .. _examples-index: Examples ======== """) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. seen_backrefs = set() generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs) for directory in sorted(os.listdir(example_dir)): if os.path.isdir(os.path.join(example_dir, directory)): generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs) fhindex.flush() def extract_line_count(filename, target_dir): # Extract the line count of a file example_file = os.path.join(target_dir, filename) if six.PY2: lines = open(example_file).readlines() else: lines = open(example_file, encoding='utf-8').readlines() start_row = 0 if lines and lines[0].startswith('#!'): lines.pop(0) start_row = 1 line_iterator = iter(lines) tokens = tokenize.generate_tokens(lambda: next(line_iterator)) check_docstring = True erow_docstring = 0 for tok_type, _, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif (tok_type == 'STRING') and check_docstring: erow_docstring = erow check_docstring = False return erow_docstring+1+start_row, erow+1+start_row def line_count_sort(file_list, target_dir): # Sort the list of examples by line-count new_list = [x for x in file_list if x.endswith('.py')] unsorted = np.zeros(shape=(len(new_list), 2)) unsorted = unsorted.astype(np.object) for count, exmpl in enumerate(new_list): docstr_lines, total_lines = extract_line_count(exmpl, target_dir) unsorted[count][1] = total_lines - docstr_lines unsorted[count][0] = exmpl index = np.lexsort((unsorted[:, 0].astype(np.str), unsorted[:, 1].astype(np.float))) if not len(unsorted): return [] return np.array(unsorted[index][:, 0]).tolist() def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False): """Generates RST to place a thumbnail in a gallery""" thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png') link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_') ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_') if ref_name.startswith('._'): ref_name = ref_name[2:] out = [] out.append(""" .. raw:: html <div class="thumbnailContainer" tooltip="{}"> """.format(snippet)) out.append('.. only:: html\n\n') out.append(' .. figure:: %s\n' % thumb) if link_name.startswith('._'): link_name = link_name[2:] if full_dir != '.': out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3])) else: out.append(' :target: ./%s.html\n\n' % link_name[:-3]) out.append(""" :ref:`example_%s` .. raw:: html </div> """ % (ref_name)) if is_backref: out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name) return ''.join(out) def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs): """ Generate the rst file for an example directory. """ if not directory == '.': target_dir = os.path.join(root_dir, directory) src_dir = os.path.join(example_dir, directory) else: target_dir = root_dir src_dir = example_dir if not os.path.exists(os.path.join(src_dir, 'README.txt')): raise ValueError('Example directory %s does not have a README.txt' % src_dir) fhindex.write(""" %s """ % open(os.path.join(src_dir, 'README.txt')).read()) if not os.path.exists(target_dir): os.makedirs(target_dir) sorted_listdir = line_count_sort(os.listdir(src_dir), src_dir) if not os.path.exists(os.path.join(directory, 'images', 'thumb')): os.makedirs(os.path.join(directory, 'images', 'thumb')) for fname in sorted_listdir: if fname.endswith('py'): backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery) new_fname = os.path.join(src_dir, fname) _, snippet, _ = extract_docstring(new_fname, True) fhindex.write(_thumbnail_div(directory, directory, fname, snippet)) fhindex.write(""" .. toctree:: :hidden: %s/%s """ % (directory, fname[:-3])) for backref in backrefs: include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref) seen = backref in seen_backrefs with open(include_path, 'a' if seen else 'w') as ex_file: if not seen: # heading print(file=ex_file) print('Examples using ``%s``' % backref, file=ex_file) print('-----------------%s--' % ('-' * len(backref)), file=ex_file) print(file=ex_file) rel_dir = os.path.join('../../auto_examples', directory) ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True)) seen_backrefs.add(backref) fhindex.write(""" .. raw:: html <div class="clearer"></div> """) # clear at the end of the section # modules for which we embed links into example code DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy'] def make_thumbnail(in_fname, out_fname, width, height): """Make a thumbnail with the same aspect ratio centered in an image with a given width and height """ # local import to avoid testing dependency on PIL: try: from PIL import Image except ImportError: import Image img = Image.open(in_fname) width_in, height_in = img.size scale_w = width / float(width_in) scale_h = height / float(height_in) if height_in * scale_w <= height: scale = scale_w else: scale = scale_h width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image img.thumbnail((width_sc, height_sc), Image.ANTIALIAS) # insert centered thumb = Image.new('RGB', (width, height), (255, 255, 255)) pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2) thumb.paste(img, pos_insert) thumb.save(out_fname) # Use optipng to perform lossless compression on the resized image if # software is installed if os.environ.get('SKLEARN_DOC_OPTIPNG', False): try: subprocess.call(["optipng", "-quiet", "-o", "9", out_fname]) except Exception: warnings.warn('Install optipng to reduce the size of the generated images') def get_short_module_name(module_name, obj_name): """ Get the shortest possible module name """ parts = module_name.split('.') short_name = module_name for i in range(len(parts) - 1, 0, -1): short_name = '.'.join(parts[:i]) try: exec('from %s import %s' % (short_name, obj_name)) except ImportError: # get the last working module name short_name = '.'.join(parts[:(i + 1)]) break return short_name class NameFinder(ast.NodeVisitor): """Finds the longest form of variable names and their imports in code Only retains names from imported modules. """ def __init__(self): super(NameFinder, self).__init__() self.imported_names = {} self.accessed_names = set() def visit_Import(self, node, prefix=''): for alias in node.names: local_name = alias.asname or alias.name self.imported_names[local_name] = prefix + alias.name def visit_ImportFrom(self, node): self.visit_Import(node, node.module + '.') def visit_Name(self, node): self.accessed_names.add(node.id) def visit_Attribute(self, node): attrs = [] while isinstance(node, ast.Attribute): attrs.append(node.attr) node = node.value if isinstance(node, ast.Name): # This is a.b, not e.g. a().b attrs.append(node.id) self.accessed_names.add('.'.join(reversed(attrs))) else: # need to get a in a().b self.visit(node) def get_mapping(self): for name in self.accessed_names: local_name = name.split('.', 1)[0] remainder = name[len(local_name):] if local_name in self.imported_names: # Join import path to relative path full_name = self.imported_names[local_name] + remainder yield name, full_name def identify_names(code): """Builds a codeobj summary by identifying and resovles used names >>> code = ''' ... from a.b import c ... import d as e ... print(c) ... e.HelloWorld().f.g ... ''' >>> for name, o in sorted(identify_names(code).items()): ... print(name, o['name'], o['module'], o['module_short']) c c a.b a.b e.HelloWorld HelloWorld d d """ finder = NameFinder() finder.visit(ast.parse(code)) example_code_obj = {} for name, full_name in finder.get_mapping(): # name is as written in file (e.g. np.asarray) # full_name includes resolved import path (e.g. numpy.asarray) module, attribute = full_name.rsplit('.', 1) # get shortened module name module_short = get_short_module_name(module, attribute) cobj = {'name': attribute, 'module': module, 'module_short': module_short} example_code_obj[name] = cobj return example_code_obj def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery): """ Generate the rst file for a given example. Returns the set of sklearn functions/classes imported in the example. """ base_image_name = os.path.splitext(fname)[0] image_fname = '%s_%%03d.png' % base_image_name this_template = rst_template last_dir = os.path.split(src_dir)[-1] # to avoid leading . in file names, and wrong names in links if last_dir == '.' or last_dir == 'examples': last_dir = '' else: last_dir += '_' short_fname = last_dir + fname src_file = os.path.join(src_dir, fname) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) # The following is a list containing all the figure names figure_list = [] image_dir = os.path.join(target_dir, 'images') thumb_dir = os.path.join(image_dir, 'thumb') if not os.path.exists(image_dir): os.makedirs(image_dir) if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) image_path = os.path.join(image_dir, image_fname) stdout_path = os.path.join(image_dir, 'stdout_%s.txt' % base_image_name) time_path = os.path.join(image_dir, 'time_%s.txt' % base_image_name) thumb_file = os.path.join(thumb_dir, base_image_name + '.png') time_elapsed = 0 if plot_gallery and fname.startswith('plot'): # generate the plot as png image if file name # starts with plot and if it is more recent than an # existing image. first_image_file = image_path % 1 if os.path.exists(stdout_path): stdout = open(stdout_path).read() else: stdout = '' if os.path.exists(time_path): time_elapsed = float(open(time_path).read()) if not os.path.exists(first_image_file) or \ os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime: # We need to execute the code print('plotting %s' % fname) t0 = time() import matplotlib.pyplot as plt plt.close('all') cwd = os.getcwd() try: # First CD in the original example dir, so that any file # created by the example get created in this directory orig_stdout = sys.stdout os.chdir(os.path.dirname(src_file)) my_buffer = StringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout my_globals = {'pl': plt} execfile(os.path.basename(src_file), my_globals) time_elapsed = time() - t0 sys.stdout = orig_stdout my_stdout = my_buffer.getvalue() if '__doc__' in my_globals: # The __doc__ is often printed in the example, we # don't with to echo it my_stdout = my_stdout.replace( my_globals['__doc__'], '') my_stdout = my_stdout.strip().expandtabs() if my_stdout: stdout = '**Script output**::\n\n %s\n\n' % ( '\n '.join(my_stdout.split('\n'))) open(stdout_path, 'w').write(stdout) open(time_path, 'w').write('%f' % time_elapsed) os.chdir(cwd) # In order to save every figure we have two solutions : # * iterate from 1 to infinity and call plt.fignum_exists(n) # (this requires the figures to be numbered # incrementally: 1, 2, 3 and not 1, 2, 5) # * iterate over [fig_mngr.num for fig_mngr in # matplotlib._pylab_helpers.Gcf.get_all_fig_managers()] fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers() for fig_mngr in fig_managers: # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_mngr.num) kwargs = {} to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr): kwargs[attr] = fig_attr fig.savefig(image_path % fig_mngr.num, **kwargs) figure_list.append(image_fname % fig_mngr.num) except: print(80 * '_') print('%s is not compiling:' % fname) traceback.print_exc() print(80 * '_') finally: os.chdir(cwd) sys.stdout = orig_stdout print(" - time elapsed : %.2g sec" % time_elapsed) else: figure_list = [f[len(image_dir):] for f in glob.glob(image_path.replace("%03d", '[0-9][0-9][0-9]'))] figure_list.sort() # generate thumb file this_template = plot_rst_template car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/') # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file` # which is within `auto_examples/../images/thumbs` depending on the example. # Because the carousel has different dimensions than those of the examples gallery, # I did not simply reuse them all as some contained whitespace due to their default gallery # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't # just be overwritten with the carousel dimensions as it messes up the examples gallery layout). # The special carousel thumbnails are written directly to _build/html/stable/_images/, # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then # copied to the _images folder during the `Copying Downloadable Files` step like the rest. if not os.path.exists(car_thumb_path): os.makedirs(car_thumb_path) if os.path.exists(first_image_file): # We generate extra special thumbnails for the carousel carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png') first_img = image_fname % 1 if first_img in carousel_thumbs: make_thumbnail((image_path % carousel_thumbs[first_img][0]), carousel_tfile, carousel_thumbs[first_img][1], 190) make_thumbnail(first_image_file, thumb_file, 400, 280) if not os.path.exists(thumb_file): # create something to replace the thumbnail make_thumbnail('images/no_image.png', thumb_file, 200, 140) docstring, short_desc, end_row = extract_docstring(example_file) # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') else: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') time_m, time_s = divmod(time_elapsed, 60) f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w') f.write(this_template % locals()) f.flush() # save variables so we can later add links to the documentation if six.PY2: example_code_obj = identify_names(open(example_file).read()) else: example_code_obj = \ identify_names(open(example_file, encoding='utf-8').read()) if example_code_obj: codeobj_fname = example_file[:-3] + '_codeobj.pickle' with open(codeobj_fname, 'wb') as fid: pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL) backrefs = set('{module_short}.{name}'.format(**entry) for entry in example_code_obj.values() if entry['module'].startswith('sklearn')) return backrefs def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" if exception is not None: return print('Embedding documentation hyperlinks in examples..') if app.builder.name == 'latex': # Don't embed hyperlinks when a latex builder is used. return # Add resolvers for the packages for which we want to show links doc_resolvers = {} doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir, relative=True) resolver_urls = { 'matplotlib': 'http://matplotlib.org', 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0', 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference', } for this_module, url in resolver_urls.items(): try: doc_resolvers[this_module] = SphinxDocLinkResolver(url) except HTTPError as e: print("The following HTTP Error has occurred:\n") print(e.code) except URLError as e: print("\n...\n" "Warning: Embedding the documentation hyperlinks requires " "internet access.\nPlease check your network connection.\n" "Unable to continue embedding `{0}` links due to a URL " "Error:\n".format(this_module)) print(e.args) example_dir = os.path.join(app.builder.srcdir, 'auto_examples') html_example_dir = os.path.abspath(os.path.join(app.builder.outdir, 'auto_examples')) # patterns for replacement link_pattern = '<a href="%s">%s</a>' orig_pattern = '<span class="n">%s</span>' period = '<span class="o">.</span>' for dirpath, _, filenames in os.walk(html_example_dir): for fname in filenames: print('\tprocessing: %s' % fname) full_fname = os.path.join(html_example_dir, dirpath, fname) subpath = dirpath[len(html_example_dir) + 1:] pickle_fname = os.path.join(example_dir, subpath, fname[:-5] + '_codeobj.pickle') if os.path.exists(pickle_fname): # we have a pickle file with the objects to embed links for with open(pickle_fname, 'rb') as fid: example_code_obj = pickle.load(fid) fid.close() str_repl = {} # generate replacement strings with the links for name, cobj in example_code_obj.items(): this_module = cobj['module'].split('.')[0] if this_module not in doc_resolvers: continue try: link = doc_resolvers[this_module].resolve(cobj, full_fname) except (HTTPError, URLError) as e: print("The following error has occurred:\n") print(repr(e)) continue if link is not None: parts = name.split('.') name_html = period.join(orig_pattern % part for part in parts) str_repl[name_html] = link_pattern % (link, name_html) # do the replacement in the html file # ensure greediness names = sorted(str_repl, key=len, reverse=True) expr = re.compile(r'(?<!\.)\b' + # don't follow . or word '|'.join(re.escape(name) for name in names)) def substitute_link(match): return str_repl[match.group()] if len(str_repl) > 0: with open(full_fname, 'rb') as fid: lines_in = fid.readlines() with open(full_fname, 'wb') as fid: for line in lines_in: line = line.decode('utf-8') line = expr.sub(substitute_link, line) fid.write(line.encode('utf-8')) print('[done]') def setup(app): app.connect('builder-inited', generate_example_rst) app.add_config_value('plot_gallery', True, 'html') # embed links after build is finished app.connect('build-finished', embed_code_links) # Sphinx hack: sphinx copies generated images to the build directory # each time the docs are made. If the desired image name already # exists, it appends a digit to prevent overwrites. The problem is, # the directory is never cleared. This means that each time you build # the docs, the number of images in the directory grows. # # This question has been asked on the sphinx development list, but there # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html # # The following is a hack that prevents this behavior by clearing the # image build directory each time the docs are built. If sphinx # changes their layout between versions, this will not work (though # it should probably not cause a crash). Tested successfully # on Sphinx 1.0.7 build_image_dir = '_build/html/_images' if os.path.exists(build_image_dir): filelist = os.listdir(build_image_dir) for filename in filelist: if filename.endswith('png'): os.remove(os.path.join(build_image_dir, filename)) def setup_module(): # HACK: Stop nosetests running setup() above pass
bsd-3-clause
musically-ut/statsmodels
examples/python/kernel_density.py
33
1805
## Kernel Density Estimation import numpy as np from scipy import stats import statsmodels.api as sm import matplotlib.pyplot as plt from statsmodels.distributions.mixture_rvs import mixture_rvs ##### A univariate example. np.random.seed(12345) obs_dist1 = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.norm], kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) kde = sm.nonparametric.KDEUnivariate(obs_dist1) kde.fit() fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.hist(obs_dist1, bins=50, normed=True, color='red') ax.plot(kde.support, kde.density, lw=2, color='black'); obs_dist2 = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta], kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5)))) kde2 = sm.nonparametric.KDEUnivariate(obs_dist2) kde2.fit() fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.hist(obs_dist2, bins=50, normed=True, color='red') ax.plot(kde2.support, kde2.density, lw=2, color='black'); # The fitted KDE object is a full non-parametric distribution. obs_dist3 = mixture_rvs([.25,.75], size=1000, dist=[stats.norm, stats.norm], kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5))) kde3 = sm.nonparametric.KDEUnivariate(obs_dist3) kde3.fit() kde3.entropy kde3.evaluate(-1) ##### CDF fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(kde3.support, kde3.cdf); ##### Cumulative Hazard Function fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(kde3.support, kde3.cumhazard); ##### Inverse CDF fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(kde3.support, kde3.icdf); ##### Survival Function fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(kde3.support, kde3.sf);
bsd-3-clause
YosefLab/scVI
docs/conf.py
1
8412
#!/usr/bin/env python # -*- coding: utf-8 -*- # # scvi documentation build configuration file, created by # sphinx-quickstart on Fri Jun 9 13:47:02 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # import sys from pathlib import Path HERE = Path(__file__).parent sys.path[:0] = [str(HERE.parent), str(HERE / "extensions")] import scvi # noqa # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = "3.0" # Nicer param docs # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.viewcode", "nbsphinx", "nbsphinx_link", "sphinx.ext.mathjax", "sphinx.ext.napoleon", "sphinx_autodoc_typehints", # needs to be after napoleon "sphinx.ext.intersphinx", "sphinx.ext.autosummary", "scanpydoc.elegant_typehints", "scanpydoc.definition_list_typed_field", "scanpydoc.autosummary_generate_imported", *[p.stem for p in (HERE / "extensions").glob("*.py")], ] # nbsphinx specific settings exclude_patterns = ["_build", "**.ipynb_checkpoints"] nbsphinx_execute = "never" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # Generate the API documentation when building autosummary_generate = True autodoc_member_order = "bysource" napoleon_google_docstring = False napoleon_numpy_docstring = True napoleon_include_init_with_doc = False napoleon_use_rtype = True # having a separate entry generally helps readability napoleon_use_param = True napoleon_custom_sections = [("Params", "Parameters")] todo_include_todos = False numpydoc_show_class_members = False annotate_defaults = True # scanpydoc option, look into why we need this nbsphinx_prolog = r""" .. raw:: html <style> .nbinput .prompt, .nboutput .prompt { display: none; } p { padding-top: 5px; } .nboutput .stderr{ display: none; } </style> {% set docname = env.doc2path(env.docname, base=None).split("/")[-1] %} .. raw:: html <div class="admonition note"> <p class="admonition-title">Note</p> <p> This page was generated from <a class="reference external" href="https://github.com/yoseflab/scvi-tutorials/">{{ docname|e }}</a>. Interactive online version: <span style="white-space: nowrap;"><a href="https://colab.research.google.com/github/yoseflab/scvi_tutorials/blob/master/{{ docname|e }}"><img alt="Colab badge" src="https://colab.research.google.com/assets/colab-badge.svg" style="vertical-align:text-bottom"></a>.</span> </p> </div> """ # The master toctree document. master_doc = "index" intersphinx_mapping = dict( anndata=("https://anndata.readthedocs.io/en/stable/", None), ipython=("https://ipython.readthedocs.io/en/stable/", None), matplotlib=("https://matplotlib.org/", None), numpy=("https://docs.scipy.org/doc/numpy/", None), pandas=("https://pandas.pydata.org/pandas-docs/stable/", None), python=("https://docs.python.org/3", None), scipy=("https://docs.scipy.org/doc/scipy/reference/", None), sklearn=("https://scikit-learn.org/stable/", None), torch=("https://pytorch.org/docs/master/", None), scanpy=("https://scanpy.readthedocs.io/en/stable/", None), ) qualname_overrides = { "scvi.data.dataset.GeneExpressionDataset": "scvi.data.GeneExpressionDataset" } # General information about the project. project = u"scvi" copyright = u"2020, Yosef Lab, UC Berkeley" author = u"Romain Lopez, Adam Gayoso, Pierre Boyeau, Galen Xing" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = scvi.__version__ # The full version, including alpha/beta/rc tags. release = scvi.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} html_logo = "_static/logo.png" html_theme_options = { "github_url": "https://github.com/YosefLab/scvi-tools", "twitter_url": "https://twitter.com/YosefLab", # "use_edit_page_button": True, } html_context = dict( # display_github=True, # Integrate GitHub github_user="YosefLab", # Username github_repo="scvi-tools", # Repo name github_version="master", # Version doc_path="docs/", # Path in the checkout to the docs root ) # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_css_files = ["css/user_guide.css", "css/custom.css"] html_show_sphinx = False # -- Options for HTMLHelp output --------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = "scvidoc" mathjax_config = { "extensions": ["tex2jax.js"], "jax": ["input/TeX", "output/HTML-CSS"], "tex2jax": { "inlineMath": [["$", "$"], ["\\(", "\\)"]], "displayMath": [["$$", "$$"], ["\\[", "\\]"]], "processEscapes": True, }, } # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto, manual, or own class]). latex_documents = [ (master_doc, "scvi.tex", u"scvi Documentation", u"Romain Lopez", "manual") ] # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "scvi", u"scVI Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "scvi", u"scvi Documentation", author, "scvi", "One line description of project.", "Miscellaneous", ) ]
bsd-3-clause
joshbohde/scikit-learn
doc/sphinxext/numpy_ext/docscrape_sphinx.py
22
7924
import re, inspect, textwrap, pydoc import sphinx from docscrape import NumpyDocString, FunctionDoc, ClassDoc class SphinxDocString(NumpyDocString): def __init__(self, docstring, config=None): config = {} if config is None else config self.use_plots = config.get('use_plots', False) NumpyDocString.__init__(self, docstring, config=config) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param,param_type,desc in self[name]: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) out += [''] out += self._str_indent(desc,8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() if not self._obj or hasattr(self._obj, param): autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: # GAEL: Toctree commented out below because it creates # hundreds of sphinx warnings # out += ['.. autosummary::', ' :toctree:', ''] out += ['.. autosummary::', ''] out += autosum if others: maxlen_0 = max([len(x[0]) for x in others]) maxlen_1 = max([len(x[1]) for x in others]) hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) n_indent = maxlen_0 + maxlen_1 + 4 out += [hdr] for param, param_type, desc in others: out += [fmt % (param.strip(), param_type)] out += self._str_indent(desc, n_indent) out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.iteritems(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": out += ['.. only:: latex',''] else: out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() for param_list in ('Parameters', 'Returns', 'Raises'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out,indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.use_plots = config.get('use_plots', False) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.use_plots = config.get('use_plots', False) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config=None): self._f = obj SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif callable(obj): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
mbkumar/pymatgen
pymatgen/analysis/diffraction/tests/test_tem.py
2
11775
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Unit tests for TEM calculator. """ import unittest from pymatgen.core.lattice import Lattice from pymatgen.core.structure import Structure from pymatgen.analysis.diffraction.tem import TEMCalculator from pymatgen.util.testing import PymatgenTest import numpy as np import pandas as pd import plotly.graph_objs as go __author__ = "Frank Wan, Jason Liang" __copyright__ = "Copyright 2019, The Materials Project" __version__ = "0.201" __maintainer__ = "Jason Liang" __email__ = "[email protected], [email protected]" __date__ = "2/20/20" class TEMCalculatorTest(PymatgenTest): def test_wavelength_rel(self): # Tests that the relativistic wavelength formula (for 200kv electron beam) is correct c = TEMCalculator() self.assertAlmostEqual(c.wavelength_rel(), 0.0251, places=3) def test_generate_points(self): # Tests that 3d points are properly generated c = TEMCalculator() actual = c.generate_points(-1, 1) expected = np.array([[-1, -1, -1], [-1, -1, 0], [-1, -1, 1], [0, -1, -1], [0, -1, 0], [0, -1, 1], [1, -1, -1], [1, -1, 0], [1, -1, 1], [-1, 0, -1], [-1, 0, 0], [-1, 0, 1], [0, 0, -1], [0, 0, 0], [0, 0, 1], [1, 0, -1], [1, 0, 0], [1, 0, 1], [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], [0, 1, -1], [0, 1, 0], [0, 1, 1], [1, 1, -1], [1, 1, 0], [1, 1, 1]]) self.assertArrayEqual(expected, actual) def test_zone_axis_filter(self): # Tests that the appropriate Laue-Zoned points are returned c = TEMCalculator() empty_points = np.asarray([]) self.assertEqual(c.zone_axis_filter(empty_points), []) points = np.asarray([[-1, -1, -1]]) self.assertEqual(c.zone_axis_filter(points), []) laue_1 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, -1]]) self.assertEqual(c.zone_axis_filter(laue_1, 1), [(0, 0, 1)]) def test_get_interplanar_spacings(self): # Tests that the appropriate interplacing spacing is returned c = TEMCalculator() point = [(3, 9, 0)] latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) tet = self.get_structure("Li10GeP2S12") hexa = self.get_structure("Graphite") ortho = self.get_structure("K2O2") mono = self.get_structure("Li3V2(PO4)3") spacings_cubic = c.get_interplanar_spacings(cubic, point) spacings_tet = c.get_interplanar_spacings(tet, point) spacings_hexa = c.get_interplanar_spacings(hexa, point) spacings_ortho = c.get_interplanar_spacings(ortho, point) spacings_mono = c.get_interplanar_spacings(mono, point) for p in point: self.assertAlmostEqual(spacings_cubic[p], 0.4436675557216236) self.assertAlmostEqual(spacings_tet[p], 0.9164354445646701) self.assertAlmostEqual(spacings_hexa[p], 0.19775826179547752) self.assertAlmostEqual(spacings_ortho[p], 0.5072617738916) self.assertAlmostEqual(spacings_mono[p], 0.84450786041677972) def test_bragg_angles(self): # Tests that the appropriate bragg angle is returned. Testing formula with values of x-ray diffraction in # materials project. c = TEMCalculator() latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) point = [(1, 1, 0)] spacings = c.get_interplanar_spacings(cubic, point) bragg_angles_val = np.arcsin(1.5406 / (2 * spacings[point[0]])) self.assertAlmostEqual(bragg_angles_val, 0.262, places=3) def test_get_s2(self): # Tests that the appropriate s2 factor is returned. c = TEMCalculator() latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) point = [(-10, 3, 0)] spacings = c.get_interplanar_spacings(cubic, point) angles = c.bragg_angles(spacings) s2 = c.get_s2(angles) for p in s2: self.assertAlmostEqual(s2[p], 1.5381852947115047) def test_x_ray_factors(self): c = TEMCalculator() latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) point = [(-10, 3, 0)] spacings = c.get_interplanar_spacings(cubic, point) angles = c.bragg_angles(spacings) x_ray = c.x_ray_factors(cubic, angles) self.assertAlmostEqual(x_ray['Cs'][(-10, 3, 0)], 14.42250869579648) self.assertAlmostEqual(x_ray['Cl'][(-10, 3, 0)], 2.7804915737999103) def test_electron_scattering_factors(self): # Test the electron atomic scattering factor, values approximate with # international table of crystallography volume C. Rounding error when converting hkl to sin(theta)/lambda. # Error increases as sin(theta)/lambda is smaller. c = TEMCalculator() latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) nacl = Structure.from_spacegroup("Fm-3m", Lattice.cubic(5.692), ["Na", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) point = [(2, 1, 3)] point_nacl = [(4, 2, 0)] spacings = c.get_interplanar_spacings(cubic, point) spacings_nacl = c.get_interplanar_spacings(nacl, point_nacl) angles = c.bragg_angles(spacings) angles_nacl = c.bragg_angles(spacings_nacl) elscatt = c.electron_scattering_factors(cubic, angles) elscatt_nacl = c.electron_scattering_factors(nacl, angles_nacl) self.assertAlmostEqual(elscatt['Cs'][(2, 1, 3)], 2.890, places=1) self.assertAlmostEqual(elscatt['Cl'][(2, 1, 3)], 1.138, places=1) self.assertAlmostEqual(elscatt_nacl['Na'][(4, 2, 0)], 0.852, places=1) self.assertAlmostEqual(elscatt_nacl['Cl'][(4, 2, 0)], 1.372, places=1) def test_cell_scattering_factors(self): # Test that fcc structure gives 0 intensity for mixed even, odd hkl. c = TEMCalculator() nacl = Structure.from_spacegroup("Fm-3m", Lattice.cubic(5.692), ["Na", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) point = [(2, 1, 0)] spacings = c.get_interplanar_spacings(nacl, point) angles = c.bragg_angles(spacings) cellscatt = c.cell_scattering_factors(nacl, angles) self.assertAlmostEqual(cellscatt[(2, 1, 0)], 0) def test_cell_intensity(self): # Test that bcc structure gives lower intensity for h + k + l != even. c = TEMCalculator() latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) point = [(2, 1, 0)] point2 = [(2, 2, 0)] spacings = c.get_interplanar_spacings(cubic, point) spacings2 = c.get_interplanar_spacings(cubic, point2) angles = c.bragg_angles(spacings) angles2 = c.bragg_angles(spacings2) cellint = c.cell_intensity(cubic, angles) cellint2 = c.cell_intensity(cubic, angles2) self.assertGreater(cellint2[(2, 2, 0)], cellint[(2, 1, 0)]) def test_normalized_cell_intensity(self): # Test that the method correctly normalizes a value. c = TEMCalculator() latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) point = [(2, 0, 0)] spacings = c.get_interplanar_spacings(cubic, point) angles = c.bragg_angles(spacings) cellint = c.normalized_cell_intensity(cubic, angles) self.assertAlmostEqual(cellint[(2, 0, 0)], 1) def test_is_parallel(self): c = TEMCalculator() structure = self.get_structure("Si") self.assertTrue(c.is_parallel(structure, (1, 0, 0), (3, 0, 0))) self.assertFalse(c.is_parallel(structure, (1, 0, 0), (3, 0, 1))) def test_get_first_point(self): c = TEMCalculator() latt = Lattice.cubic(4.209) points = c.generate_points(-2, 2) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) first_pt = c.get_first_point(cubic, points) self.assertTrue(4.209 in first_pt.values()) def test_interplanar_angle(self): # test interplanar angles. Reference values from KW Andrews, # Interpretation of Electron Diffraction pp70-90. c = TEMCalculator() latt = Lattice.cubic(4.209) cubic = Structure(latt, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]]) phi = c.get_interplanar_angle(cubic, (0, 0, -1), (0, -1, 0)) self.assertAlmostEqual(90, phi, places=1) tet = self.get_structure("Li10GeP2S12") phi = c.get_interplanar_angle(tet, (0, 0, 1), (1, 0, 3)) self.assertAlmostEqual(25.796, phi, places=1) latt = Lattice.hexagonal(2, 4) hex = Structure(latt, ["Ab"], [[0, 0, 0]]) phi = c.get_interplanar_angle(hex, (0, 0, 1), (1, 0, 6)) self.assertAlmostEqual(21.052, phi, places=1) def test_get_plot_coeffs(self): # Test if x * p1 + y * p2 yields p3. c = TEMCalculator() coeffs = c.get_plot_coeffs((1, 1, 0), (1, -1, 0), (2, 0, 0)) self.assertArrayAlmostEqual(np.array([1., 1.]), coeffs) def test_get_positions(self): c = TEMCalculator() points = c.generate_points(-2, 2) structure = self.get_structure("Si") positions = c.get_positions(structure, points) self.assertArrayEqual([0, 0], positions[(0, 0, 0)]) # Test silicon diffraction data spot rough positions: # see https://www.doitpoms.ac.uk/tlplib/diffraction-patterns/printall.php self.assertArrayAlmostEqual([1, 0], positions[(-1, 0, 0)], 0) def test_TEM_dots(self): # All dependencies in TEM_dots method are tested. Only make sure each object created is # the class desired. c = TEMCalculator() points = c.generate_points(-2, 2) structure = self.get_structure("Si") dots = c.tem_dots(structure, points) self.assertTrue(all([isinstance(x, tuple) for x in dots])) def test_get_pattern(self): # All dependencies in get_pattern method are tested. # Only make sure result is a pd dataframe. c = TEMCalculator() structure = self.get_structure("Si") self.assertTrue(isinstance(c.get_pattern(structure), pd.DataFrame)) def test_get_plot_2d(self): c = TEMCalculator() structure = self.get_structure("Si") self.assertTrue(isinstance(c.get_plot_2d(structure), go.Figure)) def test_get_plot_2d_concise(self): c = TEMCalculator() structure = self.get_structure("Si") fig = c.get_plot_2d_concise(structure) width = fig.layout.width height = fig.layout.height self.assertTrue(width == 121 and height == 121) if __name__ == '__main__': unittest.main()
mit
meteorcloudy/tensorflow
tensorflow/contrib/distributions/python/ops/mixture.py
13
21088
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Mixture distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops.distributions import categorical from tensorflow.python.ops.distributions import distribution from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.util import deprecation class Mixture(distribution.Distribution): """Mixture distribution. The `Mixture` object implements batched mixture distributions. The mixture model is defined by a `Categorical` distribution (the mixture) and a python list of `Distribution` objects. Methods supported include `log_prob`, `prob`, `mean`, `sample`, and `entropy_lower_bound`. #### Examples ```python # Create a mixture of two Gaussians: tfd = tf.contrib.distributions mix = 0.3 bimix_gauss = tfd.Mixture( cat=tfd.Categorical(probs=[mix, 1.-mix]), components=[ tfd.Normal(loc=-1., scale=0.1), tfd.Normal(loc=+1., scale=0.5), ]) # Plot the PDF. import matplotlib.pyplot as plt x = tf.linspace(-2., 3., int(1e4)).eval() plt.plot(x, bimix_gauss.prob(x).eval()); ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, cat, components, validate_args=False, allow_nan_stats=True, use_static_graph=False, name="Mixture"): """Initialize a Mixture distribution. A `Mixture` is defined by a `Categorical` (`cat`, representing the mixture probabilities) and a list of `Distribution` objects all having matching dtype, batch shape, event shape, and continuity properties (the components). The `num_classes` of `cat` must be possible to infer at graph construction time and match `len(components)`. Args: cat: A `Categorical` distribution instance, representing the probabilities of `distributions`. components: A list or tuple of `Distribution` instances. Each instance must have the same type, be defined on the same domain, and have matching `event_shape` and `batch_shape`. validate_args: Python `bool`, default `False`. If `True`, raise a runtime error if batch or event ranks are inconsistent between cat and any of the distributions. This is only checked if the ranks cannot be determined statically at graph construction time. allow_nan_stats: Boolean, default `True`. If `False`, raise an exception if a statistic (e.g. mean/mode/etc...) is undefined for any batch member. If `True`, batch members with valid parameters leading to undefined statistics will return NaN for this statistic. use_static_graph: Calls to `sample` will not rely on dynamic tensor indexing, allowing for some static graph compilation optimizations, but at the expense of sampling all underlying distributions in the mixture. (Possibly useful when running on TPUs). Default value: `False` (i.e., use dynamic indexing). name: A name for this distribution (optional). Raises: TypeError: If cat is not a `Categorical`, or `components` is not a list or tuple, or the elements of `components` are not instances of `Distribution`, or do not have matching `dtype`. ValueError: If `components` is an empty list or tuple, or its elements do not have a statically known event rank. If `cat.num_classes` cannot be inferred at graph creation time, or the constant value of `cat.num_classes` is not equal to `len(components)`, or all `components` and `cat` do not have matching static batch shapes, or all components do not have matching static event shapes. """ parameters = dict(locals()) if not isinstance(cat, categorical.Categorical): raise TypeError("cat must be a Categorical distribution, but saw: %s" % cat) if not components: raise ValueError("components must be a non-empty list or tuple") if not isinstance(components, (list, tuple)): raise TypeError("components must be a list or tuple, but saw: %s" % components) if not all(isinstance(c, distribution.Distribution) for c in components): raise TypeError( "all entries in components must be Distribution instances" " but saw: %s" % components) dtype = components[0].dtype if not all(d.dtype == dtype for d in components): raise TypeError("All components must have the same dtype, but saw " "dtypes: %s" % [(d.name, d.dtype) for d in components]) static_event_shape = components[0].event_shape static_batch_shape = cat.batch_shape for d in components: static_event_shape = static_event_shape.merge_with(d.event_shape) static_batch_shape = static_batch_shape.merge_with(d.batch_shape) if static_event_shape.ndims is None: raise ValueError( "Expected to know rank(event_shape) from components, but " "none of the components provide a static number of ndims") # Ensure that all batch and event ndims are consistent. with ops.name_scope(name, values=[cat.logits]) as name: num_components = cat.event_size static_num_components = tensor_util.constant_value(num_components) if static_num_components is None: raise ValueError( "Could not infer number of classes from cat and unable " "to compare this value to the number of components passed in.") # Possibly convert from numpy 0-D array. static_num_components = int(static_num_components) if static_num_components != len(components): raise ValueError("cat.num_classes != len(components): %d vs. %d" % (static_num_components, len(components))) cat_batch_shape = cat.batch_shape_tensor() cat_batch_rank = array_ops.size(cat_batch_shape) if validate_args: batch_shapes = [d.batch_shape_tensor() for d in components] batch_ranks = [array_ops.size(bs) for bs in batch_shapes] check_message = ("components[%d] batch shape must match cat " "batch shape") self._assertions = [ check_ops.assert_equal( cat_batch_rank, batch_ranks[di], message=check_message % di) for di in range(len(components)) ] self._assertions += [ check_ops.assert_equal( cat_batch_shape, batch_shapes[di], message=check_message % di) for di in range(len(components)) ] else: self._assertions = [] self._cat = cat self._components = list(components) self._num_components = static_num_components self._static_event_shape = static_event_shape self._static_batch_shape = static_batch_shape self._use_static_graph = use_static_graph if use_static_graph and static_num_components is None: raise ValueError("Number of categories must be known statically when " "`static_sample=True`.") # We let the Mixture distribution access _graph_parents since its arguably # more like a baseclass. graph_parents = self._cat._graph_parents # pylint: disable=protected-access for c in self._components: graph_parents += c._graph_parents # pylint: disable=protected-access super(Mixture, self).__init__( dtype=dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=graph_parents, name=name) @property def cat(self): return self._cat @property def components(self): return self._components @property def num_components(self): return self._num_components def _batch_shape_tensor(self): return self._cat.batch_shape_tensor() def _batch_shape(self): return self._static_batch_shape def _event_shape_tensor(self): return self._components[0].event_shape_tensor() def _event_shape(self): return self._static_event_shape def _expand_to_event_rank(self, x): """Expand the rank of x up to static_event_rank times for broadcasting. The static event rank was checked to not be None at construction time. Args: x: A tensor to expand. Returns: The expanded tensor. """ expanded_x = x for _ in range(self.event_shape.ndims): expanded_x = array_ops.expand_dims(expanded_x, -1) return expanded_x def _mean(self): with ops.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] cat_probs = self._cat_probs(log_probs=False) cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs] partial_means = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_means) ] # These should all be the same shape by virtue of matching # batch_shape and event_shape. return math_ops.add_n(partial_means) def _stddev(self): with ops.control_dependencies(self._assertions): distribution_means = [d.mean() for d in self.components] distribution_devs = [d.stddev() for d in self.components] cat_probs = self._cat_probs(log_probs=False) stacked_means = array_ops.stack(distribution_means, axis=-1) stacked_devs = array_ops.stack(distribution_devs, axis=-1) cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs] broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) * array_ops.ones_like(stacked_means)) batched_dev = distribution_utils.mixture_stddev( array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]), array_ops.reshape(stacked_means, [-1, len(self.components)]), array_ops.reshape(stacked_devs, [-1, len(self.components)])) # I.e. re-shape to list(batch_shape) + list(event_shape). return array_ops.reshape(batched_dev, array_ops.shape(broadcasted_cat_probs)[:-1]) def _log_prob(self, x): with ops.control_dependencies(self._assertions): x = ops.convert_to_tensor(x, name="x") distribution_log_probs = [d.log_prob(x) for d in self.components] cat_log_probs = self._cat_probs(log_probs=True) final_log_probs = [ cat_lp + d_lp for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs) ] concat_log_probs = array_ops.stack(final_log_probs, 0) log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0]) return log_sum_exp def _log_cdf(self, x): with ops.control_dependencies(self._assertions): x = ops.convert_to_tensor(x, name="x") distribution_log_cdfs = [d.log_cdf(x) for d in self.components] cat_log_probs = self._cat_probs(log_probs=True) final_log_cdfs = [ cat_lp + d_lcdf for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs) ] concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0) mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0]) return mixture_log_cdf def _sample_n(self, n, seed=None): if self._use_static_graph: # This sampling approach is almost the same as the approach used by # `MixtureSameFamily`. The differences are due to having a list of # `Distribution` objects rather than a single object, and maintaining # random seed management that is consistent with the non-static code path. samples = [] cat_samples = self.cat.sample(n, seed=seed) for c in range(self.num_components): seed = distribution_util.gen_new_seed(seed, "mixture") samples.append(self.components[c].sample(n, seed=seed)) x = array_ops.stack( samples, -self._static_event_shape.ndims - 1) # [n, B, k, E] npdt = x.dtype.as_numpy_dtype mask = array_ops.one_hot( indices=cat_samples, # [n, B] depth=self._num_components, # == k on_value=np.ones([], dtype=npdt), off_value=np.zeros([], dtype=npdt)) # [n, B, k] mask = distribution_utils.pad_mixture_dimensions( mask, self, self._cat, self._static_event_shape.ndims) # [n, B, k, [1]*e] return math_ops.reduce_sum( x * mask, axis=-1 - self._static_event_shape.ndims) # [n, B, E] with ops.control_dependencies(self._assertions): n = ops.convert_to_tensor(n, name="n") static_n = tensor_util.constant_value(n) n = int(static_n) if static_n is not None else n cat_samples = self.cat.sample(n, seed=seed) static_samples_shape = cat_samples.get_shape() if static_samples_shape.is_fully_defined(): samples_shape = static_samples_shape.as_list() samples_size = static_samples_shape.num_elements() else: samples_shape = array_ops.shape(cat_samples) samples_size = array_ops.size(cat_samples) static_batch_shape = self.batch_shape if static_batch_shape.is_fully_defined(): batch_shape = static_batch_shape.as_list() batch_size = static_batch_shape.num_elements() else: batch_shape = self.batch_shape_tensor() batch_size = math_ops.reduce_prod(batch_shape) static_event_shape = self.event_shape if static_event_shape.is_fully_defined(): event_shape = np.array(static_event_shape.as_list(), dtype=np.int32) else: event_shape = self.event_shape_tensor() # Get indices into the raw cat sampling tensor. We will # need these to stitch sample values back out after sampling # within the component partitions. samples_raw_indices = array_ops.reshape( math_ops.range(0, samples_size), samples_shape) # Partition the raw indices so that we can use # dynamic_stitch later to reconstruct the samples from the # known partitions. partitioned_samples_indices = data_flow_ops.dynamic_partition( data=samples_raw_indices, partitions=cat_samples, num_partitions=self.num_components) # Copy the batch indices n times, as we will need to know # these to pull out the appropriate rows within the # component partitions. batch_raw_indices = array_ops.reshape( array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape) # Explanation of the dynamic partitioning below: # batch indices are i.e., [0, 1, 0, 1, 0, 1] # Suppose partitions are: # [1 1 0 0 1 1] # After partitioning, batch indices are cut as: # [batch_indices[x] for x in 2, 3] # [batch_indices[x] for x in 0, 1, 4, 5] # i.e. # [1 1] and [0 0 0 0] # Now we sample n=2 from part 0 and n=4 from part 1. # For part 0 we want samples from batch entries 1, 1 (samples 0, 1), # and for part 1 we want samples from batch entries 0, 0, 0, 0 # (samples 0, 1, 2, 3). partitioned_batch_indices = data_flow_ops.dynamic_partition( data=batch_raw_indices, partitions=cat_samples, num_partitions=self.num_components) samples_class = [None for _ in range(self.num_components)] for c in range(self.num_components): n_class = array_ops.size(partitioned_samples_indices[c]) seed = distribution_util.gen_new_seed(seed, "mixture") samples_class_c = self.components[c].sample(n_class, seed=seed) # Pull out the correct batch entries from each index. # To do this, we may have to flatten the batch shape. # For sample s, batch element b of component c, we get the # partitioned batch indices from # partitioned_batch_indices[c]; and shift each element by # the sample index. The final lookup can be thought of as # a matrix gather along locations (s, b) in # samples_class_c where the n_class rows correspond to # samples within this component and the batch_size columns # correspond to batch elements within the component. # # Thus the lookup index is # lookup[c, i] = batch_size * s[i] + b[c, i] # for i = 0 ... n_class[c] - 1. lookup_partitioned_batch_indices = ( batch_size * math_ops.range(n_class) + partitioned_batch_indices[c]) samples_class_c = array_ops.reshape( samples_class_c, array_ops.concat([[n_class * batch_size], event_shape], 0)) samples_class_c = array_ops.gather( samples_class_c, lookup_partitioned_batch_indices, name="samples_class_c_gather") samples_class[c] = samples_class_c # Stitch back together the samples across the components. lhs_flat_ret = data_flow_ops.dynamic_stitch( indices=partitioned_samples_indices, data=samples_class) # Reshape back to proper sample, batch, and event shape. ret = array_ops.reshape(lhs_flat_ret, array_ops.concat([samples_shape, self.event_shape_tensor()], 0)) ret.set_shape( tensor_shape.TensorShape(static_samples_shape).concatenate( self.event_shape)) return ret def entropy_lower_bound(self, name="entropy_lower_bound"): r"""A lower bound on the entropy of this mixture model. The bound below is not always very tight, and its usefulness depends on the mixture probabilities and the components in use. A lower bound is useful for ELBO when the `Mixture` is the variational distribution: \\( \log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q] \\) where \\( p \\) is the prior distribution, \\( q \\) is the variational, and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound \\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in place of \\( H[q] \\). For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with \\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a simple lower bound is: \\( \begin{align} H[q] & = - \int q(z) \log q(z) dz \\\ & = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\ & \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\ & = \sum_i c_i H[q_i] \end{align} \\) This is the term we calculate below for \\( G[q] \\). Args: name: A name for this operation (optional). Returns: A lower bound on the Mixture's entropy. """ with self._name_scope(name, values=[self.cat.logits]): with ops.control_dependencies(self._assertions): distribution_entropies = [d.entropy() for d in self.components] cat_probs = self._cat_probs(log_probs=False) partial_entropies = [ c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies) ] # These are all the same shape by virtue of matching batch_shape return math_ops.add_n(partial_entropies) def _cat_probs(self, log_probs): """Get a list of num_components batchwise probabilities.""" which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax cat_probs = which_softmax(self.cat.logits) cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1) return cat_probs
apache-2.0
kazemakase/scikit-learn
sklearn/cluster/tests/test_birch.py
342
5603
""" Tests for the birch clustering algorithm. """ from scipy import sparse import numpy as np from sklearn.cluster.tests.common import generate_clustered_data from sklearn.cluster.birch import Birch from sklearn.cluster.hierarchical import AgglomerativeClustering from sklearn.datasets import make_blobs from sklearn.linear_model import ElasticNet from sklearn.metrics import pairwise_distances_argmin, v_measure_score from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns def test_n_samples_leaves_roots(): # Sanity check for the number of samples in leaves and roots X, y = make_blobs(n_samples=10) brc = Birch() brc.fit(X) n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_]) n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves() for sc in leaf.subclusters_]) assert_equal(n_samples_leaves, X.shape[0]) assert_equal(n_samples_root, X.shape[0]) def test_partial_fit(): # Test that fit is equivalent to calling partial_fit multiple times X, y = make_blobs(n_samples=100) brc = Birch(n_clusters=3) brc.fit(X) brc_partial = Birch(n_clusters=None) brc_partial.partial_fit(X[:50]) brc_partial.partial_fit(X[50:]) assert_array_equal(brc_partial.subcluster_centers_, brc.subcluster_centers_) # Test that same global labels are obtained after calling partial_fit # with None brc_partial.set_params(n_clusters=3) brc_partial.partial_fit(None) assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_) def test_birch_predict(): # Test the predict method predicts the nearest centroid. rng = np.random.RandomState(0) X = generate_clustered_data(n_clusters=3, n_features=3, n_samples_per_cluster=10) # n_samples * n_samples_per_cluster shuffle_indices = np.arange(30) rng.shuffle(shuffle_indices) X_shuffle = X[shuffle_indices, :] brc = Birch(n_clusters=4, threshold=1.) brc.fit(X_shuffle) centroids = brc.subcluster_centers_ assert_array_equal(brc.labels_, brc.predict(X_shuffle)) nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids) assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0) def test_n_clusters(): # Test that n_clusters param works properly X, y = make_blobs(n_samples=100, centers=10) brc1 = Birch(n_clusters=10) brc1.fit(X) assert_greater(len(brc1.subcluster_centers_), 10) assert_equal(len(np.unique(brc1.labels_)), 10) # Test that n_clusters = Agglomerative Clustering gives # the same results. gc = AgglomerativeClustering(n_clusters=10) brc2 = Birch(n_clusters=gc) brc2.fit(X) assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_) assert_array_equal(brc1.labels_, brc2.labels_) # Test that the wrong global clustering step raises an Error. clf = ElasticNet() brc3 = Birch(n_clusters=clf) assert_raises(ValueError, brc3.fit, X) # Test that a small number of clusters raises a warning. brc4 = Birch(threshold=10000.) assert_warns(UserWarning, brc4.fit, X) def test_sparse_X(): # Test that sparse and dense data give same results X, y = make_blobs(n_samples=100, centers=10) brc = Birch(n_clusters=10) brc.fit(X) csr = sparse.csr_matrix(X) brc_sparse = Birch(n_clusters=10) brc_sparse.fit(csr) assert_array_equal(brc.labels_, brc_sparse.labels_) assert_array_equal(brc.subcluster_centers_, brc_sparse.subcluster_centers_) def check_branching_factor(node, branching_factor): subclusters = node.subclusters_ assert_greater_equal(branching_factor, len(subclusters)) for cluster in subclusters: if cluster.child_: check_branching_factor(cluster.child_, branching_factor) def test_branching_factor(): # Test that nodes have at max branching_factor number of subclusters X, y = make_blobs() branching_factor = 9 # Purposefully set a low threshold to maximize the subclusters. brc = Birch(n_clusters=None, branching_factor=branching_factor, threshold=0.01) brc.fit(X) check_branching_factor(brc.root_, branching_factor) brc = Birch(n_clusters=3, branching_factor=branching_factor, threshold=0.01) brc.fit(X) check_branching_factor(brc.root_, branching_factor) # Raises error when branching_factor is set to one. brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01) assert_raises(ValueError, brc.fit, X) def check_threshold(birch_instance, threshold): """Use the leaf linked list for traversal""" current_leaf = birch_instance.dummy_leaf_.next_leaf_ while current_leaf: subclusters = current_leaf.subclusters_ for sc in subclusters: assert_greater_equal(threshold, sc.radius) current_leaf = current_leaf.next_leaf_ def test_threshold(): # Test that the leaf subclusters have a threshold lesser than radius X, y = make_blobs(n_samples=80, centers=4) brc = Birch(threshold=0.5, n_clusters=None) brc.fit(X) check_threshold(brc, 0.5) brc = Birch(threshold=5.0, n_clusters=None) brc.fit(X) check_threshold(brc, 5.)
bsd-3-clause
mdhaber/scipy
scipy/stats/morestats.py
6
128770
from __future__ import annotations import math import warnings from collections import namedtuple import numpy as np from numpy import (isscalar, r_, log, around, unique, asarray, zeros, arange, sort, amin, amax, atleast_1d, sqrt, array, compress, pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot) from scipy import optimize from scipy import special from . import statlib from . import stats from .stats import find_repeats, _contains_nan, _normtest_finish from .contingency import chi2_contingency from . import distributions from ._distn_infrastructure import rv_generic from ._hypotests import _get_wilcoxon_distr __all__ = ['mvsdist', 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', 'fligner', 'mood', 'wilcoxon', 'median_test', 'circmean', 'circvar', 'circstd', 'anderson_ksamp', 'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax', 'yeojohnson_normplot' ] Mean = namedtuple('Mean', ('statistic', 'minmax')) Variance = namedtuple('Variance', ('statistic', 'minmax')) Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) def bayes_mvs(data, alpha=0.90): r""" Bayesian confidence intervals for the mean, var, and std. Parameters ---------- data : array_like Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. Requires 2 or more data points. alpha : float, optional Probability that the returned confidence interval contains the true parameter. Returns ------- mean_cntr, var_cntr, std_cntr : tuple The three results are for the mean, variance and standard deviation, respectively. Each result is a tuple of the form:: (center, (lower, upper)) with `center` the mean of the conditional pdf of the value given the data, and `(lower, upper)` a confidence interval, centered on the median, containing the estimate to a probability ``alpha``. See Also -------- mvsdist Notes ----- Each tuple of mean, variance, and standard deviation estimates represent the (center, (lower, upper)) with center the mean of the conditional pdf of the value given the data and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability ``alpha``. Converts data to 1-D and assumes all data has the same mean and variance. Uses Jeffrey's prior for variance and std. Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, 2006. Examples -------- First a basic example to demonstrate the outputs: >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.bayes_mvs(data) >>> mean Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467)) >>> var Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...)) >>> std Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631)) Now we generate some normally distributed random data, and get estimates of mean and standard deviation with 95% confidence intervals for those estimates: >>> n_samples = 100000 >>> data = stats.norm.rvs(size=n_samples) >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.hist(data, bins=100, density=True, label='Histogram of data') >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', ... alpha=0.2, label=r'Estimated mean (95% limits)') >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, ... label=r'Estimated scale (95% limits)') >>> ax.legend(fontsize=10) >>> ax.set_xlim([-4, 4]) >>> ax.set_ylim([0, 0.5]) >>> plt.show() """ m, v, s = mvsdist(data) if alpha >= 1 or alpha <= 0: raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha) m_res = Mean(m.mean(), m.interval(alpha)) v_res = Variance(v.mean(), v.interval(alpha)) s_res = Std_dev(s.mean(), s.interval(alpha)) return m_res, v_res, s_res def mvsdist(data): """ 'Frozen' distributions for mean, variance, and standard deviation of data. Parameters ---------- data : array_like Input array. Converted to 1-D using ravel. Requires 2 or more data-points. Returns ------- mdist : "frozen" distribution object Distribution object representing the mean of the data. vdist : "frozen" distribution object Distribution object representing the variance of the data. sdist : "frozen" distribution object Distribution object representing the standard deviation of the data. See Also -------- bayes_mvs Notes ----- The return values from ``bayes_mvs(data)`` is equivalent to ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` on the three distribution objects returned from this function will give the same results that are returned from `bayes_mvs`. References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", https://scholarsarchive.byu.edu/facpub/278, 2006. Examples -------- >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.mvsdist(data) We now have frozen distribution objects "mean", "var" and "std" that we can examine: >>> mean.mean() 9.0 >>> mean.interval(0.95) (6.6120585482655692, 11.387941451734431) >>> mean.std() 1.1952286093343936 """ x = ravel(data) n = len(x) if n < 2: raise ValueError("Need at least 2 data-points.") xbar = x.mean() C = x.var() if n > 1000: # gaussian approximations for large n mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) else: nm1 = n - 1 fac = n * C / 2. val = nm1 / 2. mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) vdist = distributions.invgamma(val, scale=fac) return mdist, vdist, sdist def kstat(data, n=2): r""" Return the nth k-statistic (1<=n<=4 so far). The nth k-statistic k_n is the unique symmetric unbiased estimator of the nth cumulant kappa_n. Parameters ---------- data : array_like Input array. Note that n-D input gets flattened. n : int, {1, 2, 3, 4}, optional Default is equal to 2. Returns ------- kstat : float The nth k-statistic. See Also -------- kstatvar: Returns an unbiased estimator of the variance of the k-statistic. moment: Returns the n-th central moment about the mean for a sample. Notes ----- For a sample size n, the first few k-statistics are given by: .. math:: k_{1} = \mu k_{2} = \frac{n}{n-1} m_{2} k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3} k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)} where :math:`\mu` is the sample mean, :math:`m_2` is the sample variance, and :math:`m_i` is the i-th sample central moment. References ---------- http://mathworld.wolfram.com/k-Statistic.html http://mathworld.wolfram.com/Cumulant.html Examples -------- >>> from scipy import stats >>> from numpy.random import default_rng >>> rng = default_rng() As sample size increases, n-th moment and n-th k-statistic converge to the same number (although they aren't identical). In the case of the normal distribution, they converge to zero. >>> for n in [2, 3, 4, 5, 6, 7]: ... x = rng.normal(size=10**n) ... m, k = stats.moment(x, 3), stats.kstat(x, 3) ... print("%.3g %.3g %.3g" % (m, k, m-k)) -0.631 -0.651 0.0194 # random 0.0282 0.0283 -8.49e-05 -0.0454 -0.0454 1.36e-05 7.53e-05 7.53e-05 -2.26e-09 0.00166 0.00166 -4.99e-09 -2.88e-06 -2.88e-06 8.63e-13 """ if n > 4 or n < 1: raise ValueError("k-statistics only supported for 1<=n<=4") n = int(n) S = np.zeros(n + 1, np.float64) data = ravel(data) N = data.size # raise ValueError on empty input if N == 0: raise ValueError("Data input must not be empty") # on nan input, return nan without warning if np.isnan(np.sum(data)): return np.nan for k in range(1, n + 1): S[k] = np.sum(data**k, axis=0) if n == 1: return S[1] * 1.0/N elif n == 2: return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) elif n == 3: return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) elif n == 4: return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / (N*(N-1.0)*(N-2.0)*(N-3.0))) else: raise ValueError("Should not be here.") def kstatvar(data, n=2): r"""Return an unbiased estimator of the variance of the k-statistic. See `kstat` for more details of the k-statistic. Parameters ---------- data : array_like Input array. Note that n-D input gets flattened. n : int, {1, 2}, optional Default is equal to 2. Returns ------- kstatvar : float The nth k-statistic variance. See Also -------- kstat: Returns the n-th k-statistic. moment: Returns the n-th central moment about the mean for a sample. Notes ----- The variances of the first few k-statistics are given by: .. math:: var(k_{1}) = \frac{\kappa^2}{n} var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1} var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} + \frac{9 \kappa^2_{3}}{n - 1} + \frac{6 n \kappa^3_{2}}{(n-1) (n-2)} var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} + \frac{48 \kappa_{3} \kappa_5}{n - 1} + \frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} + \frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} + \frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)} """ data = ravel(data) N = len(data) if n == 1: return kstat(data, n=2) * 1.0/N elif n == 2: k2 = kstat(data, n=2) k4 = kstat(data, n=4) return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) else: raise ValueError("Only n=1 or n=2 supported.") def _calc_uniform_order_statistic_medians(n): """Approximations of uniform order statistic medians. Parameters ---------- n : int Sample size. Returns ------- v : 1d float array Approximations of the order statistic medians. References ---------- .. [1] James J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- Order statistics of the uniform distribution on the unit interval are marginally distributed according to beta distributions. The expectations of these order statistic are evenly spaced across the interval, but the distributions are skewed in a way that pushes the medians slightly towards the endpoints of the unit interval: >>> n = 4 >>> k = np.arange(1, n+1) >>> from scipy.stats import beta >>> a = k >>> b = n-k+1 >>> beta.mean(a, b) array([0.2, 0.4, 0.6, 0.8]) >>> beta.median(a, b) array([0.15910358, 0.38572757, 0.61427243, 0.84089642]) The Filliben approximation uses the exact medians of the smallest and greatest order statistics, and the remaining medians are approximated by points spread evenly across a sub-interval of the unit interval: >>> from scipy.morestats import _calc_uniform_order_statistic_medians >>> _calc_uniform_order_statistic_medians(n) array([0.15910358, 0.38545246, 0.61454754, 0.84089642]) This plot shows the skewed distributions of the order statistics of a sample of size four from a uniform distribution on the unit interval: >>> import matplotlib.pyplot as plt >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True) >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)] >>> plt.figure() >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3]) """ v = np.empty(n, dtype=np.float64) v[-1] = 0.5**(1.0 / n) v[0] = 1 - v[-1] i = np.arange(2, n) v[1:-1] = (i - 0.3175) / (n + 0.365) return v def _parse_dist_kw(dist, enforce_subclass=True): """Parse `dist` keyword. Parameters ---------- dist : str or stats.distributions instance. Several functions take `dist` as a keyword, hence this utility function. enforce_subclass : bool, optional If True (default), `dist` needs to be a `_distn_infrastructure.rv_generic` instance. It can sometimes be useful to set this keyword to False, if a function wants to accept objects that just look somewhat like such an instance (for example, they have a ``ppf`` method). """ if isinstance(dist, rv_generic): pass elif isinstance(dist, str): try: dist = getattr(distributions, dist) except AttributeError as e: raise ValueError("%s is not a valid distribution name" % dist) from e elif enforce_subclass: msg = ("`dist` should be a stats.distributions instance or a string " "with the name of such a distribution.") raise ValueError(msg) return dist def _add_axis_labels_title(plot, xlabel, ylabel, title): """Helper function to add axes labels and a title to stats plots.""" try: if hasattr(plot, 'set_title'): # Matplotlib Axes instance or something that looks like it plot.set_title(title) plot.set_xlabel(xlabel) plot.set_ylabel(ylabel) else: # matplotlib.pyplot module plot.title(title) plot.xlabel(xlabel) plot.ylabel(ylabel) except Exception: # Not an MPL object or something that looks (enough) like it. # Don't crash on adding labels or title pass def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False): """ Calculate quantiles for a probability plot, and optionally show the plot. Generates a probability plot of sample data against the quantiles of a specified theoretical distribution (the normal distribution by default). `probplot` optionally calculates a best-fit line for the data and plots the results using Matplotlib or a given plot function. Parameters ---------- x : array_like Sample/response data from which `probplot` creates the plot. sparams : tuple, optional Distribution-specific shape parameters (shape parameters plus location and scale). dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. fit : bool, optional Fit a least-squares regression (best-fit) line to the sample data if True (default). plot : object, optional If given, plots the quantiles. If given and `fit` is True, also plots the least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. Returns ------- (osm, osr) : tuple of ndarrays Tuple of theoretical quantiles (osm, or order statistic medians) and ordered responses (osr). `osr` is simply sorted input `x`. For details on how `osm` is calculated see the Notes section. (slope, intercept, r) : tuple of floats, optional Tuple containing the result of the least-squares fit, if that is performed by `probplot`. `r` is the square root of the coefficient of determination. If ``fit=False`` and ``plot=None``, this tuple is not returned. Notes ----- Even if `plot` is given, the figure is not shown or saved by `probplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. `probplot` generates a probability plot, which should not be confused with a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this type, see ``statsmodels.api.ProbPlot``. The formula used for the theoretical quantiles (horizontal axis of the probability plot) is Filliben's estimate:: quantiles = dist.ppf(val), for 0.5**(1/n), for i = n val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 1 - 0.5**(1/n), for i = 1 where ``i`` indicates the i-th ordered value and ``n`` is the total number of values. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> nsample = 100 >>> rng = np.random.default_rng() A t distribution with small degrees of freedom: >>> ax1 = plt.subplot(221) >>> x = stats.t.rvs(3, size=nsample, random_state=rng) >>> res = stats.probplot(x, plot=plt) A t distribution with larger degrees of freedom: >>> ax2 = plt.subplot(222) >>> x = stats.t.rvs(25, size=nsample, random_state=rng) >>> res = stats.probplot(x, plot=plt) A mixture of two normal distributions with broadcasting: >>> ax3 = plt.subplot(223) >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], ... size=(nsample//2,2), random_state=rng).ravel() >>> res = stats.probplot(x, plot=plt) A standard normal distribution: >>> ax4 = plt.subplot(224) >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample, random_state=rng) >>> res = stats.probplot(x, plot=plt) Produce a new figure with a loggamma distribution, using the ``dist`` and ``sparams`` keywords: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> x = stats.loggamma.rvs(c=2.5, size=500, random_state=rng) >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") Show the results with Matplotlib: >>> plt.show() """ x = np.asarray(x) if x.size == 0: if fit: return (x, x), (np.nan, np.nan, 0.0) else: return x, x osm_uniform = _calc_uniform_order_statistic_medians(len(x)) dist = _parse_dist_kw(dist, enforce_subclass=False) if sparams is None: sparams = () if isscalar(sparams): sparams = (sparams,) if not isinstance(sparams, tuple): sparams = tuple(sparams) osm = dist.ppf(osm_uniform, *sparams) osr = sort(x) if fit: # perform a linear least squares fit. slope, intercept, r, prob, _ = stats.linregress(osm, osr) if plot is not None: plot.plot(osm, osr, 'bo') if fit: plot.plot(osm, slope*osm + intercept, 'r-') _add_axis_labels_title(plot, xlabel='Theoretical quantiles', ylabel='Ordered Values', title='Probability Plot') # Add R^2 value to the plot as text if rvalue: xmin = amin(osm) xmax = amax(osm) ymin = amin(x) ymax = amax(x) posx = xmin + 0.70 * (xmax - xmin) posy = ymin + 0.01 * (ymax - ymin) plot.text(posx, posy, "$R^2=%1.4f$" % r**2) if fit: return (osm, osr), (slope, intercept, r) else: return osm, osr def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): """Calculate the shape parameter that maximizes the PPCC. The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. ``ppcc_max`` returns the shape parameter that would maximize the probability plot correlation coefficient for the given data to a one-parameter family of distributions. Parameters ---------- x : array_like Input array. brack : tuple, optional Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c) then they are assumed to be a starting interval for a downhill bracket search (see `scipy.optimize.brent`). dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. Returns ------- shape_value : float The shape parameter at which the probability plot correlation coefficient reaches its max value. See Also -------- ppcc_plot, probplot, boxcox Notes ----- The brack keyword serves as a starting point which is useful in corner cases. One can use a plot to obtain a rough visual estimate of the location for the maximum to start the search near it. References ---------- .. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. .. [2] Engineering Statistics Handbook, NIST/SEMATEC, https://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm Examples -------- First we generate some random data from a Weibull distribution with shape parameter 2.5: >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() >>> c = 2.5 >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng) Generate the PPCC plot for this data with the Weibull distribution. >>> fig, ax = plt.subplots(figsize=(8, 6)) >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax) We calculate the value where the shape should reach its maximum and a red line is drawn there. The line should coincide with the highest point in the PPCC graph. >>> cmax = stats.ppcc_max(x, brack=(c/2, 2*c), dist='weibull_min') >>> ax.axvline(cmax, color='r') >>> plt.show() """ dist = _parse_dist_kw(dist) osm_uniform = _calc_uniform_order_statistic_medians(len(x)) osr = sort(x) # this function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) # and returns 1-r so that a minimization function maximizes the # correlation def tempfunc(shape, mi, yvals, func): xvals = func(mi, shape) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf)) def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): """Calculate and optionally plot probability plot correlation coefficient. The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. It cannot be used for distributions without shape parameters (like the normal distribution) or with multiple shape parameters. By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed distributions via an approximately normal one, and is therefore particularly useful in practice. Parameters ---------- x : array_like Input array. a, b : scalar Lower and upper bounds of the shape parameter to use. dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. plot : object, optional If given, plots PPCC against the shape parameter. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `a` to `b`). Returns ------- svals : ndarray The shape values for which `ppcc` was calculated. ppcc : ndarray The calculated probability plot correlation coefficient values. See Also -------- ppcc_max, probplot, boxcox_normplot, tukeylambda References ---------- J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- First we generate some random data from a Weibull distribution with shape parameter 2.5, and plot the histogram of the data: >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> rng = np.random.default_rng() >>> c = 2.5 >>> x = stats.weibull_min.rvs(c, scale=4, size=2000, random_state=rng) Take a look at the histogram of the data. >>> fig1, ax = plt.subplots(figsize=(9, 4)) >>> ax.hist(x, bins=50) >>> ax.set_title('Histogram of x') >>> plt.show() Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter ``c`` used above): >>> fig2 = plt.figure(figsize=(12, 4)) >>> ax1 = fig2.add_subplot(1, 3, 1) >>> ax2 = fig2.add_subplot(1, 3, 2) >>> ax3 = fig2.add_subplot(1, 3, 3) >>> res = stats.probplot(x, plot=ax1) >>> res = stats.boxcox_normplot(x, -4, 4, plot=ax2) >>> res = stats.ppcc_plot(x, c/2, 2*c, dist='weibull_min', plot=ax3) >>> ax3.axvline(c, color='r') >>> plt.show() """ if b <= a: raise ValueError("`b` has to be larger than `a`.") svals = np.linspace(a, b, num=N) ppcc = np.empty_like(svals) for k, sval in enumerate(svals): _, r2 = probplot(x, sval, dist=dist, fit=True) ppcc[k] = r2[-1] if plot is not None: plot.plot(svals, ppcc, 'x') _add_axis_labels_title(plot, xlabel='Shape Values', ylabel='Prob Plot Corr. Coef.', title='(%s) PPCC Plot' % dist) return svals, ppcc def boxcox_llf(lmb, data): r"""The boxcox log-likelihood function. Parameters ---------- lmb : scalar Parameter for Box-Cox transformation. See `boxcox` for details. data : array_like Data to calculate Box-Cox log-likelihood for. If `data` is multi-dimensional, the log-likelihood is calculated along the first axis. Returns ------- llf : float or ndarray Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, an array otherwise. See Also -------- boxcox, probplot, boxcox_normplot, boxcox_normmax Notes ----- The Box-Cox log-likelihood function is defined here as .. math:: llf = (\lambda - 1) \sum_i(\log(x_i)) - N/2 \log(\sum_i (y_i - \bar{y})^2 / N), where ``y`` is the Box-Cox transformed input data ``x``. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes Generate some random variates and calculate Box-Cox log-likelihood values for them for a range of ``lmbda`` values: >>> rng = np.random.default_rng() >>> x = stats.loggamma.rvs(5, loc=10, size=1000, random_state=rng) >>> lmbdas = np.linspace(-2, 10) >>> llf = np.zeros(lmbdas.shape, dtype=float) >>> for ii, lmbda in enumerate(lmbdas): ... llf[ii] = stats.boxcox_llf(lmbda, x) Also find the optimal lmbda value with `boxcox`: >>> x_most_normal, lmbda_optimal = stats.boxcox(x) Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a horizontal line to check that that's really the optimum: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(lmbdas, llf, 'b.-') >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') >>> ax.set_xlabel('lmbda parameter') >>> ax.set_ylabel('Box-Cox log-likelihood') Now add some probability plots to show that where the log-likelihood is maximized the data transformed with `boxcox` looks closest to normal: >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): ... xt = stats.boxcox(x, lmbda=lmbda) ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') ... ax_inset.set_xticklabels([]) ... ax_inset.set_yticklabels([]) ... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda) >>> plt.show() """ data = np.asarray(data) N = data.shape[0] if N == 0: return np.nan logdata = np.log(data) # Compute the variance of the transformed data. if lmb == 0: variance = np.var(logdata, axis=0) else: # Transform without the constant offset 1/lmb. The offset does # not effect the variance, and the subtraction of the offset can # lead to loss of precision. variance = np.var(data**lmb / lmb, axis=0) return (lmb - 1) * np.sum(logdata, axis=0) - N/2 * np.log(variance) def _boxcox_conf_interval(x, lmax, alpha): # Need to find the lambda for which # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) target = boxcox_llf(lmax, x) - fac def rootfunc(lmbda, data, target): return boxcox_llf(lmbda, data) - target # Find positive endpoint of interval in which answer is to be found newlm = lmax + 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm += 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) # Now find negative interval in the same way newlm = lmax - 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm -= 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) return lmminus, lmplus def boxcox(x, lmbda=None, alpha=None, optimizer=None): r"""Return a dataset transformed by a Box-Cox power transformation. Parameters ---------- x : ndarray Input array. Must be positive 1-dimensional. Must not be constant. lmbda : {None, scalar}, optional If `lmbda` is not None, do the transformation for that value. If `lmbda` is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument. alpha : {None, float}, optional If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third output argument. Must be between 0.0 and 1.0. optimizer : callable, optional If `lmbda` is None, `optimizer` is the scalar optimizer used to find the value of `lmbda` that minimizes the negative log-likelihood function. `optimizer` is a callable that accepts one argument: fun : callable The objective function, which evaluates the negative log-likelihood function at a provided value of `lmbda` and returns an object, such as an instance of `scipy.optimize.OptimizeResult`, which holds the optimal value of `lmbda` in an attribute `x`. See the example in `boxcox_normmax` or the documentation of `scipy.optimize.minimize_scalar` for more information. If `lmbda` is not None, `optimizer` is ignored. Returns ------- boxcox : ndarray Box-Cox power transformed array. maxlog : float, optional If the `lmbda` parameter is None, the second returned argument is the lambda that maximizes the log-likelihood function. (min_ci, max_ci) : tuple of float, optional If `lmbda` parameter is None and ``alpha`` is not None, this returned tuple of floats represents the minimum and maximum confidence limits given ``alpha``. See Also -------- probplot, boxcox_normplot, boxcox_normmax, boxcox_llf Notes ----- The Box-Cox transform is given by:: y = (x**lmbda - 1) / lmbda, for lmbda != 0 log(x), for lmbda = 0 `boxcox` requires the input data to be positive. Sometimes a Box-Cox transformation provides a shift parameter to achieve this; `boxcox` does not. Such a shift parameter is equivalent to adding a positive constant to `x` before calling `boxcox`. The confidence limits returned when ``alpha`` is provided give the interval where: .. math:: llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared function. References ---------- G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt We generate some random variates from a non-normal distribution and make a probability plot for it, to show it is non-normal in the tails: >>> fig = plt.figure() >>> ax1 = fig.add_subplot(211) >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) >>> ax1.set_xlabel('') >>> ax1.set_title('Probplot against normal distribution') We now use `boxcox` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, _ = stats.boxcox(x) >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Box-Cox transformation') >>> plt.show() """ x = np.asarray(x) if x.ndim != 1: raise ValueError("Data must be 1-dimensional.") if x.size == 0: return x if np.all(x == x[0]): raise ValueError("Data must not be constant.") if np.any(x <= 0): raise ValueError("Data must be positive.") if lmbda is not None: # single transformation return special.boxcox(x, lmbda) # If lmbda=None, find the lmbda that maximizes the log-likelihood function. lmax = boxcox_normmax(x, method='mle', optimizer=optimizer) y = boxcox(x, lmax) if alpha is None: return y, lmax else: # Find confidence interval interval = _boxcox_conf_interval(x, lmax, alpha) return y, lmax, interval def boxcox_normmax(x, brack=None, method='pearsonr', optimizer=None): """Compute optimal Box-Cox transform parameter for input data. Parameters ---------- x : array_like Input array. brack : 2-tuple, optional, default (-2.0, 2.0) The starting interval for a downhill bracket search for the default `optimize.brent` solver. Note that this is in most cases not critical; the final result is allowed to be outside this bracket. If `optimizer` is passed, `brack` must be None. method : str, optional The method to determine the optimal transform parameter (`boxcox` ``lmbda`` parameter). Options are: 'pearsonr' (default) Maximizes the Pearson correlation coefficient between ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be normally-distributed. 'mle' Minimizes the log-likelihood `boxcox_llf`. This is the method used in `boxcox`. 'all' Use all optimization methods available, and return all results. Useful to compare different methods. optimizer : callable, optional `optimizer` is a callable that accepts one argument: fun : callable The objective function to be optimized. `fun` accepts one argument, the Box-Cox transform parameter `lmbda`, and returns the negative log-likelihood function at the provided value. The job of `optimizer` is to find the value of `lmbda` that minimizes `fun`. and returns an object, such as an instance of `scipy.optimize.OptimizeResult`, which holds the optimal value of `lmbda` in an attribute `x`. See the example below or the documentation of `scipy.optimize.minimize_scalar` for more information. Returns ------- maxlog : float or ndarray The optimal transform parameter found. An array instead of a scalar for ``method='all'``. See Also -------- boxcox, boxcox_llf, boxcox_normplot, scipy.optimize.minimize_scalar Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt We can generate some data and determine the optimal ``lmbda`` in various ways: >>> rng = np.random.default_rng() >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5 >>> y, lmax_mle = stats.boxcox(x) >>> lmax_pearsonr = stats.boxcox_normmax(x) >>> lmax_mle 1.4613865614008015 >>> lmax_pearsonr 1.6685004886804342 >>> stats.boxcox_normmax(x, method='all') array([1.66850049, 1.46138656]) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax) >>> ax.axvline(lmax_mle, color='r') >>> ax.axvline(lmax_pearsonr, color='g', ls='--') >>> plt.show() Alternatively, we can define our own `optimizer` function. Suppose we are only interested in values of `lmbda` on the interval [6, 7], we want to use `scipy.optimize.minimize_scalar` with ``method='bounded'``, and we want to use tighter tolerances when optimizing the log-likelihood function. To do this, we define a function that accepts positional argument `fun` and uses `scipy.optimize.minimize_scalar` to minimize `fun` subject to the provided bounds and tolerances: >>> from scipy import optimize >>> options = {'xatol': 1e-12} # absolute tolerance on `x` >>> def optimizer(fun): ... return optimize.minimize_scalar(fun, bounds=(6, 7), ... method="bounded", options=options) >>> stats.boxcox_normmax(x, optimizer=optimizer) 6.000... """ # If optimizer is not given, define default 'brent' optimizer. if optimizer is None: # Set default value for `brack`. if brack is None: brack = (-2.0, 2.0) def _optimizer(func, args): return optimize.brent(func, args=args, brack=brack) # Otherwise check optimizer. else: if not callable(optimizer): raise ValueError("`optimizer` must be a callable") if brack is not None: raise ValueError("`brack` must be None if `optimizer` is given") # `optimizer` is expected to return a `OptimizeResult` object, we here # get the solution to the optimization problem. def _optimizer(func, args): def func_wrapped(x): return func(x, *args) return getattr(optimizer(func_wrapped), 'x', None) def _pearsonr(x): osm_uniform = _calc_uniform_order_statistic_medians(len(x)) xvals = distributions.norm.ppf(osm_uniform) def _eval_pearsonr(lmbda, xvals, samps): # This function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) and # returns ``1 - r`` so that a minimization function maximizes the # correlation. y = boxcox(samps, lmbda) yvals = np.sort(y) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return _optimizer(_eval_pearsonr, args=(xvals, x)) def _mle(x): def _eval_mle(lmb, data): # function to minimize return -boxcox_llf(lmb, data) return _optimizer(_eval_mle, args=(x,)) def _all(x): maxlog = np.empty(2, dtype=float) maxlog[0] = _pearsonr(x) maxlog[1] = _mle(x) return maxlog methods = {'pearsonr': _pearsonr, 'mle': _mle, 'all': _all} if method not in methods.keys(): raise ValueError("Method %s not recognized." % method) optimfunc = methods[method] res = optimfunc(x) if res is None: message = ("`optimizer` must return an object containing the optimal " "`lmbda` in attribute `x`") raise ValueError(message) return res def _normplot(method, x, la, lb, plot=None, N=80): """Compute parameters for a Box-Cox or Yeo-Johnson normality plot, optionally show it. See `boxcox_normplot` or `yeojohnson_normplot` for details. """ if method == 'boxcox': title = 'Box-Cox Normality Plot' transform_func = boxcox else: title = 'Yeo-Johnson Normality Plot' transform_func = yeojohnson x = np.asarray(x) if x.size == 0: return x if lb <= la: raise ValueError("`lb` has to be larger than `la`.") lmbdas = np.linspace(la, lb, num=N) ppcc = lmbdas * 0.0 for i, val in enumerate(lmbdas): # Determine for each lmbda the square root of correlation coefficient # of transformed x z = transform_func(x, lmbda=val) _, (_, _, r) = probplot(z, dist='norm', fit=True) ppcc[i] = r if plot is not None: plot.plot(lmbdas, ppcc, 'x') _add_axis_labels_title(plot, xlabel='$\\lambda$', ylabel='Prob Plot Corr. Coef.', title=title) return lmbdas, ppcc def boxcox_normplot(x, la, lb, plot=None, N=80): """Compute parameters for a Box-Cox normality plot, optionally show it. A Box-Cox normality plot shows graphically what the best transformation parameter is to use in `boxcox` to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` for Box-Cox transformations. These are also the limits of the horizontal axis of the plot if that is generated. plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `la` to `lb`). Returns ------- lmbdas : ndarray The ``lmbda`` values for which a Box-Cox transform was done. ppcc : ndarray Probability Plot Correlelation Coefficient, as obtained from `probplot` when fitting the Box-Cox transformed input `x` against a normal distribution. See Also -------- probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max Notes ----- Even if `plot` is given, the figure is not shown or saved by `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt Generate some non-normally distributed data, and create a Box-Cox plot: >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax) Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in the same plot: >>> _, maxlog = stats.boxcox(x) >>> ax.axvline(maxlog, color='r') >>> plt.show() """ return _normplot('boxcox', x, la, lb, plot, N) def yeojohnson(x, lmbda=None): r"""Return a dataset transformed by a Yeo-Johnson power transformation. Parameters ---------- x : ndarray Input array. Should be 1-dimensional. lmbda : float, optional If ``lmbda`` is ``None``, find the lambda that maximizes the log-likelihood function and return it as the second output argument. Otherwise the transformation is done for the given value. Returns ------- yeojohnson: ndarray Yeo-Johnson power transformed array. maxlog : float, optional If the `lmbda` parameter is None, the second returned argument is the lambda that maximizes the log-likelihood function. See Also -------- probplot, yeojohnson_normplot, yeojohnson_normmax, yeojohnson_llf, boxcox Notes ----- The Yeo-Johnson transform is given by:: y = ((x + 1)**lmbda - 1) / lmbda, for x >= 0, lmbda != 0 log(x + 1), for x >= 0, lmbda = 0 -((-x + 1)**(2 - lmbda) - 1) / (2 - lmbda), for x < 0, lmbda != 2 -log(-x + 1), for x < 0, lmbda = 2 Unlike `boxcox`, `yeojohnson` does not require the input data to be positive. .. versionadded:: 1.2.0 References ---------- I. Yeo and R.A. Johnson, "A New Family of Power Transformations to Improve Normality or Symmetry", Biometrika 87.4 (2000): Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt We generate some random variates from a non-normal distribution and make a probability plot for it, to show it is non-normal in the tails: >>> fig = plt.figure() >>> ax1 = fig.add_subplot(211) >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) >>> ax1.set_xlabel('') >>> ax1.set_title('Probplot against normal distribution') We now use `yeojohnson` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, lmbda = stats.yeojohnson(x) >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Yeo-Johnson transformation') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if np.issubdtype(x.dtype, np.complexfloating): raise ValueError('Yeo-Johnson transformation is not defined for ' 'complex numbers.') if np.issubdtype(x.dtype, np.integer): x = x.astype(np.float64, copy=False) if lmbda is not None: return _yeojohnson_transform(x, lmbda) # if lmbda=None, find the lmbda that maximizes the log-likelihood function. lmax = yeojohnson_normmax(x) y = _yeojohnson_transform(x, lmax) return y, lmax def _yeojohnson_transform(x, lmbda): """Returns `x` transformed by the Yeo-Johnson power transform with given parameter `lmbda`. """ out = np.zeros_like(x) pos = x >= 0 # binary mask # when x >= 0 if abs(lmbda) < np.spacing(1.): out[pos] = np.log1p(x[pos]) else: # lmbda != 0 out[pos] = (np.power(x[pos] + 1, lmbda) - 1) / lmbda # when x < 0 if abs(lmbda - 2) > np.spacing(1.): out[~pos] = -(np.power(-x[~pos] + 1, 2 - lmbda) - 1) / (2 - lmbda) else: # lmbda == 2 out[~pos] = -np.log1p(-x[~pos]) return out def yeojohnson_llf(lmb, data): r"""The yeojohnson log-likelihood function. Parameters ---------- lmb : scalar Parameter for Yeo-Johnson transformation. See `yeojohnson` for details. data : array_like Data to calculate Yeo-Johnson log-likelihood for. If `data` is multi-dimensional, the log-likelihood is calculated along the first axis. Returns ------- llf : float Yeo-Johnson log-likelihood of `data` given `lmb`. See Also -------- yeojohnson, probplot, yeojohnson_normplot, yeojohnson_normmax Notes ----- The Yeo-Johnson log-likelihood function is defined here as .. math:: llf = -N/2 \log(\hat{\sigma}^2) + (\lambda - 1) \sum_i \text{ sign }(x_i)\log(|x_i| + 1) where :math:`\hat{\sigma}^2` is estimated variance of the the Yeo-Johnson transformed input data ``x``. .. versionadded:: 1.2.0 Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes Generate some random variates and calculate Yeo-Johnson log-likelihood values for them for a range of ``lmbda`` values: >>> x = stats.loggamma.rvs(5, loc=10, size=1000) >>> lmbdas = np.linspace(-2, 10) >>> llf = np.zeros(lmbdas.shape, dtype=float) >>> for ii, lmbda in enumerate(lmbdas): ... llf[ii] = stats.yeojohnson_llf(lmbda, x) Also find the optimal lmbda value with `yeojohnson`: >>> x_most_normal, lmbda_optimal = stats.yeojohnson(x) Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a horizontal line to check that that's really the optimum: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(lmbdas, llf, 'b.-') >>> ax.axhline(stats.yeojohnson_llf(lmbda_optimal, x), color='r') >>> ax.set_xlabel('lmbda parameter') >>> ax.set_ylabel('Yeo-Johnson log-likelihood') Now add some probability plots to show that where the log-likelihood is maximized the data transformed with `yeojohnson` looks closest to normal: >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): ... xt = stats.yeojohnson(x, lmbda=lmbda) ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') ... ax_inset.set_xticklabels([]) ... ax_inset.set_yticklabels([]) ... ax_inset.set_title(r'$\lambda=%1.2f$' % lmbda) >>> plt.show() """ data = np.asarray(data) n_samples = data.shape[0] if n_samples == 0: return np.nan trans = _yeojohnson_transform(data, lmb) loglike = -n_samples / 2 * np.log(trans.var(axis=0)) loglike += (lmb - 1) * (np.sign(data) * np.log(np.abs(data) + 1)).sum(axis=0) return loglike def yeojohnson_normmax(x, brack=(-2, 2)): """Compute optimal Yeo-Johnson transform parameter. Compute optimal Yeo-Johnson transform parameter for input data, using maximum likelihood estimation. Parameters ---------- x : array_like Input array. brack : 2-tuple, optional The starting interval for a downhill bracket search with `optimize.brent`. Note that this is in most cases not critical; the final result is allowed to be outside this bracket. Returns ------- maxlog : float The optimal transform parameter found. See Also -------- yeojohnson, yeojohnson_llf, yeojohnson_normplot Notes ----- .. versionadded:: 1.2.0 Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt Generate some data and determine optimal ``lmbda`` >>> rng = np.random.default_rng() >>> x = stats.loggamma.rvs(5, size=30, random_state=rng) + 5 >>> lmax = stats.yeojohnson_normmax(x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.yeojohnson_normplot(x, -10, 10, plot=ax) >>> ax.axvline(lmax, color='r') >>> plt.show() """ def _neg_llf(lmbda, data): return -yeojohnson_llf(lmbda, data) return optimize.brent(_neg_llf, brack=brack, args=(x,)) def yeojohnson_normplot(x, la, lb, plot=None, N=80): """Compute parameters for a Yeo-Johnson normality plot, optionally show it. A Yeo-Johnson normality plot shows graphically what the best transformation parameter is to use in `yeojohnson` to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the ``lmbda`` values to pass to `yeojohnson` for Yeo-Johnson transformations. These are also the limits of the horizontal axis of the plot if that is generated. plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `la` to `lb`). Returns ------- lmbdas : ndarray The ``lmbda`` values for which a Yeo-Johnson transform was done. ppcc : ndarray Probability Plot Correlelation Coefficient, as obtained from `probplot` when fitting the Box-Cox transformed input `x` against a normal distribution. See Also -------- probplot, yeojohnson, yeojohnson_normmax, yeojohnson_llf, ppcc_max Notes ----- Even if `plot` is given, the figure is not shown or saved by `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. .. versionadded:: 1.2.0 Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt Generate some non-normally distributed data, and create a Yeo-Johnson plot: >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.yeojohnson_normplot(x, -20, 20, plot=ax) Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in the same plot: >>> _, maxlog = stats.yeojohnson(x) >>> ax.axvline(maxlog, color='r') >>> plt.show() """ return _normplot('yeojohnson', x, la, lb, plot, N) ShapiroResult = namedtuple('ShapiroResult', ('statistic', 'pvalue')) def shapiro(x): """Perform the Shapiro-Wilk test for normality. The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution. Parameters ---------- x : array_like Array of sample data. Returns ------- statistic : float The test statistic. p-value : float The p-value for the hypothesis test. See Also -------- anderson : The Anderson-Darling test for normality kstest : The Kolmogorov-Smirnov test for goodness of fit. Notes ----- The algorithm used is described in [4]_ but censoring parameters as described are not implemented. For N > 5000 the W test statistic is accurate but the p-value may not be. The chance of rejecting the null hypothesis when it is true is close to 5% regardless of sample size. References ---------- .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for normality (complete samples), Biometrika, Vol. 52, pp. 591-611. .. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk, Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of Statistical Modeling and Analytics, Vol. 2, pp. 21-33. .. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4. Examples -------- >>> from scipy import stats >>> rng = np.random.default_rng() >>> x = stats.norm.rvs(loc=5, scale=3, size=100, random_state=rng) >>> shapiro_test = stats.shapiro(x) >>> shapiro_test ShapiroResult(statistic=0.9813305735588074, pvalue=0.16855233907699585) >>> shapiro_test.statistic 0.9813305735588074 >>> shapiro_test.pvalue 0.16855233907699585 """ x = np.ravel(x) N = len(x) if N < 3: raise ValueError("Data must be at least length 3.") a = zeros(N, 'f') init = 0 y = sort(x) a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) if ifault not in [0, 2]: warnings.warn("Input data for shapiro has range zero. The results " "may not be accurate.") if N > 5000: warnings.warn("p-value may not be accurate for N > 5000.") return ShapiroResult(w, pw) # Values from Stephens, M A, "EDF Statistics for Goodness of Fit and # Some Comparisons", Journal of the American Statistical # Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 _Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) _Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) # From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", # Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. _Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) # From Stephens, M A, "Tests of Fit for the Logistic Distribution Based # on the Empirical Distribution Function.", Biometrika, # Vol. 66, Issue 3, Dec. 1979, pp 591-595. _Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) AndersonResult = namedtuple('AndersonResult', ('statistic', 'critical_values', 'significance_level')) def anderson(x, dist='norm'): """Anderson-Darling test for data coming from a particular distribution. The Anderson-Darling test tests the null hypothesis that a sample is drawn from a population that follows a particular distribution. For the Anderson-Darling test, the critical values depend on which distribution is being tested against. This function works for normal, exponential, logistic, or Gumbel (Extreme Value Type I) distributions. Parameters ---------- x : array_like Array of sample data. dist : {'norm', 'expon', 'logistic', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1'}, optional The type of distribution to test against. The default is 'norm'. The names 'extreme1', 'gumbel_l' and 'gumbel' are synonyms for the same distribution. Returns ------- statistic : float The Anderson-Darling test statistic. critical_values : list The critical values for this distribution. significance_level : list The significance levels for the corresponding critical values in percents. The function returns critical values for a differing set of significance levels depending on the distribution that is being tested against. See Also -------- kstest : The Kolmogorov-Smirnov test for goodness-of-fit. Notes ----- Critical values provided are for the following significance levels: normal/exponential 15%, 10%, 5%, 2.5%, 1% logistic 25%, 10%, 5%, 2.5%, 1%, 0.5% Gumbel 25%, 10%, 5%, 2.5%, 1% If the returned statistic is larger than these critical values then for the corresponding significance level, the null hypothesis that the data come from the chosen distribution can be rejected. The returned statistic is referred to as 'A2' in the references. References ---------- .. [1] https://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and Some Comparisons, Journal of the American Statistical Association, Vol. 69, pp. 730-737. .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, pp. 357-369. .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value Distribution, Biometrika, Vol. 64, pp. 583-588. .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference to Tests for Exponentiality , Technical Report No. 262, Department of Statistics, Stanford University, Stanford, CA. .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution Based on the Empirical Distribution Function, Biometrika, Vol. 66, pp. 591-595. """ if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1', 'logistic']: raise ValueError("Invalid distribution; dist must be 'norm', " "'expon', 'gumbel', 'extreme1' or 'logistic'.") y = sort(x) xbar = np.mean(x, axis=0) N = len(y) if dist == 'norm': s = np.std(x, ddof=1, axis=0) w = (y - xbar) / s logcdf = distributions.norm.logcdf(w) logsf = distributions.norm.logsf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) elif dist == 'expon': w = y / xbar logcdf = distributions.expon.logcdf(w) logsf = distributions.expon.logsf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_expon / (1.0 + 0.6/N), 3) elif dist == 'logistic': def rootfunc(ab, xj, N): a, b = ab tmp = (xj - a) / b tmp2 = exp(tmp) val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N, np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] return array(val) sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) w = (y - sol[0]) / sol[1] logcdf = distributions.logistic.logcdf(w) logsf = distributions.logistic.logsf(w) sig = array([25, 10, 5, 2.5, 1, 0.5]) critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) elif dist == 'gumbel_r': xbar, s = distributions.gumbel_r.fit(x) w = (y - xbar) / s logcdf = distributions.gumbel_r.logcdf(w) logsf = distributions.gumbel_r.logsf(w) sig = array([25, 10, 5, 2.5, 1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1') xbar, s = distributions.gumbel_l.fit(x) w = (y - xbar) / s logcdf = distributions.gumbel_l.logcdf(w) logsf = distributions.gumbel_l.logsf(w) sig = array([25, 10, 5, 2.5, 1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) i = arange(1, N + 1) A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0) return AndersonResult(A2, critical, sig) def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): """Compute A2akN equation 7 of Scholz and Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2aKN : float The A2aKN statistics of Scholz and Stephens 1987. """ A2akN = 0. Z_ssorted_left = Z.searchsorted(Zstar, 'left') if N == Zstar.size: lj = 1. else: lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left Bj = Z_ssorted_left + lj / 2. for i in arange(0, k): s = np.sort(samples[i]) s_ssorted_right = s.searchsorted(Zstar, side='right') Mij = s_ssorted_right.astype(float) fij = s_ssorted_right - s.searchsorted(Zstar, 'left') Mij -= fij / 2. inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) A2akN += inner.sum() / n[i] A2akN *= (N - 1.) / N return A2akN def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): """Compute A2akN equation 6 of Scholz & Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2KN : float The A2KN statistics of Scholz and Stephens 1987. """ A2kN = 0. lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], 'left') Bj = lj.cumsum() for i in arange(0, k): s = np.sort(samples[i]) Mij = s.searchsorted(Zstar[:-1], side='right') inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) A2kN += inner.sum() / n[i] return A2kN Anderson_ksampResult = namedtuple('Anderson_ksampResult', ('statistic', 'critical_values', 'significance_level')) def anderson_ksamp(samples, midrank=True): """The Anderson-Darling test for k-samples. The k-sample Anderson-Darling test is a modification of the one-sample Anderson-Darling test. It tests the null hypothesis that k-samples are drawn from the same population without having to specify the distribution function of that population. The critical values depend on the number of samples. Parameters ---------- samples : sequence of 1-D array_like Array of sample data in arrays. midrank : bool, optional Type of Anderson-Darling test which is computed. Default (True) is the midrank test applicable to continuous and discrete populations. If False, the right side empirical distribution is used. Returns ------- statistic : float Normalized k-sample Anderson-Darling test statistic. critical_values : array The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%, 0.5%, 0.1%. significance_level : float An approximate significance level at which the null hypothesis for the provided samples can be rejected. The value is floored / capped at 0.1% / 25%. Raises ------ ValueError If less than 2 samples are provided, a sample is empty, or no distinct observations are in the samples. See Also -------- ks_2samp : 2 sample Kolmogorov-Smirnov test anderson : 1 sample Anderson-Darling test Notes ----- [1]_ defines three versions of the k-sample Anderson-Darling test: one for continuous distributions and two for discrete distributions, in which ties between samples may occur. The default of this routine is to compute the version based on the midrank empirical distribution function. This test is applicable to continuous and discrete data. If midrank is set to False, the right side empirical distribution is used for a test for discrete data. According to [1]_, the two discrete test statistics differ only slightly if a few collisions due to round-off errors occur in the test not adjusted for ties between samples. The critical values corresponding to the significance levels from 0.01 to 0.25 are taken from [1]_. p-values are floored / capped at 0.1% / 25%. Since the range of critical values might be extended in future releases, it is recommended not to test ``p == 0.25``, but rather ``p >= 0.25`` (analogously for the lower bound). .. versionadded:: 0.14.0 References ---------- .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample Anderson-Darling Tests, Journal of the American Statistical Association, Vol. 82, pp. 918-924. Examples -------- >>> from scipy import stats >>> rng = np.random.default_rng() The null hypothesis that the two random samples come from the same distribution can be rejected at the 5% level because the returned test value is greater than the critical value for 5% (1.961) but not at the 2.5% level. The interpolation gives an approximate significance level of 3.2%: >>> stats.anderson_ksamp([rng.normal(size=50), ... rng.normal(loc=0.5, size=30)]) (1.974403288713695, array([0.325, 1.226, 1.961, 2.718, 3.752, 4.592, 6.546]), 0.04991293614572478) The null hypothesis cannot be rejected for three samples from an identical distribution. The reported p-value (25%) has been capped and may not be very accurate (since it corresponds to the value 0.449 whereas the statistic is -0.731): >>> stats.anderson_ksamp([rng.normal(size=50), ... rng.normal(size=30), rng.normal(size=20)]) (-0.29103725200789504, array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856, 4.07210043, 5.56419101]), 0.25) """ k = len(samples) if (k < 2): raise ValueError("anderson_ksamp needs at least two samples") samples = list(map(np.asarray, samples)) Z = np.sort(np.hstack(samples)) N = Z.size Zstar = np.unique(Z) if Zstar.size < 2: raise ValueError("anderson_ksamp needs more than one distinct " "observation") n = np.array([sample.size for sample in samples]) if np.any(n == 0): raise ValueError("anderson_ksamp encountered sample without " "observations") if midrank: A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N) else: A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N) H = (1. / n).sum() hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() h = hs_cs[-1] + 1 g = (hs_cs / arange(2, N)).sum() a = (4*g - 6) * (k - 1) + (10 - 6*g)*H b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h d = (2*h + 6)*k**2 - 4*h*k sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) m = k - 1 A2 = (A2kN - m) / math.sqrt(sigmasq) # The b_i values are the interpolation coefficients from Table 2 # of Scholz and Stephens 1987 b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326, 2.573, 3.085]) b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822, 2.364, 3.615]) b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396, -0.345, -0.154]) critical = b0 + b1 / math.sqrt(m) + b2 / m sig = np.array([0.25, 0.1, 0.05, 0.025, 0.01, 0.005, 0.001]) if A2 < critical.min(): p = sig.max() warnings.warn("p-value capped: true value larger than {}".format(p), stacklevel=2) elif A2 > critical.max(): p = sig.min() warnings.warn("p-value floored: true value smaller than {}".format(p), stacklevel=2) else: # interpolation of probit of significance level pf = np.polyfit(critical, log(sig), 2) p = math.exp(np.polyval(pf, A2)) return Anderson_ksampResult(A2, critical, p) AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) class _ABW: """Distribution of Ansari-Bradley W-statistic under the null hypothesis.""" # TODO: calculate exact distribution considering ties # We could avoid summing over more than half the frequencies, # but inititally it doesn't seem worth the extra complexity def __init__(self): """Minimal initializer.""" self.m = None self.n = None self.astart = None self.total = None self.freqs = None def _recalc(self, n, m): """When necessary, recalculate exact distribution.""" if n != self.n or m != self.m: self.n, self.m = n, m # distribution is NOT symmetric when m + n is odd # n is len(x), m is len(y), and ratio of scales is defined x/y astart, a1, _ = statlib.gscale(n, m) self.astart = astart # minimum value of statistic # Exact distribution of test statistic under null hypothesis # expressed as frequencies/counts/integers to maintain precision. # Stored as floats to avoid overflow of sums. self.freqs = a1.astype(np.float64) self.total = self.freqs.sum() # could calculate from m and n # probability mass is self.freqs / self.total; def pmf(self, k, n, m): """Probability mass function.""" self._recalc(n, m) # The convention here is that PMF at k = 12.5 is the same as at k = 12, # -> use `floor` in case of ties. ind = np.floor(k - self.astart).astype(int) return self.freqs[ind] / self.total def cdf(self, k, n, m): """Cumulative distribution function.""" self._recalc(n, m) # Null distribution derived without considering ties is # approximate. Round down to avoid Type I error. ind = np.ceil(k - self.astart).astype(int) return self.freqs[:ind+1].sum() / self.total def sf(self, k, n, m): """Survival function.""" self._recalc(n, m) # Null distribution derived without considering ties is # approximate. Round down to avoid Type I error. ind = np.floor(k - self.astart).astype(int) return self.freqs[ind:].sum() / self.total # Maintain state for faster repeat calls to ansari w/ method='exact' _abw_state = _ABW() def ansari(x, y, alternative='two-sided'): """Perform the Ansari-Bradley test for equal scale parameters. The Ansari-Bradley test ([1]_, [2]_) is a non-parametric test for the equality of the scale parameter of the distributions from which two samples were drawn. The null hypothesis states that the ratio of the scale of the distribution underlying `x` to the scale of the distribution underlying `y` is 1. Parameters ---------- x, y : array_like Arrays of sample data. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. Default is 'two-sided'. The following options are available: * 'two-sided': the ratio of scales is not equal to 1. * 'less': the ratio of scales is less than 1. * 'greater': the ratio of scales is greater than 1. .. versionadded:: 1.7.0 Returns ------- statistic : float The Ansari-Bradley test statistic. pvalue : float The p-value of the hypothesis test. See Also -------- fligner : A non-parametric test for the equality of k variances mood : A non-parametric test for the equality of two scale parameters Notes ----- The p-value given is exact when the sample sizes are both less than 55 and there are no ties, otherwise a normal approximation for the p-value is used. References ---------- .. [1] Ansari, A. R. and Bradley, R. A. (1960) Rank-sum tests for dispersions, Annals of Mathematical Statistics, 31, 1174-1189. .. [2] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. .. [3] Nathaniel E. Helwig "Nonparametric Dispersion and Equality Tests" at http://users.stat.umn.edu/~helwig/notes/npde-Notes.pdf Examples -------- >>> from scipy.stats import ansari >>> rng = np.random.default_rng() For these examples, we'll create three random data sets. The first two, with sizes 35 and 25, are drawn from a normal distribution with mean 0 and standard deviation 2. The third data set has size 25 and is drawn from a normal distribution with standard deviation 1.25. >>> x1 = rng.normal(loc=0, scale=2, size=35) >>> x2 = rng.normal(loc=0, scale=2, size=25) >>> x3 = rng.normal(loc=0, scale=1.25, size=25) First we apply `ansari` to `x1` and `x2`. These samples are drawn from the same distribution, so we expect the Ansari-Bradley test should not lead us to conclude that the scales of the distributions are different. >>> ansari(x1, x2) AnsariResult(statistic=541.0, pvalue=0.9762532927399098) With a p-value close to 1, we cannot conclude that there is a significant difference in the scales (as expected). Now apply the test to `x1` and `x3`: >>> ansari(x1, x3) AnsariResult(statistic=425.0, pvalue=0.0003087020407974518) The probability of observing such an extreme value of the statistic under the null hypothesis of equal scales is only 0.03087%. We take this as evidence against the null hypothesis in favor of the alternative: the scales of the distributions from which the samples were drawn are not equal. We can use the `alternative` parameter to perform a one-tailed test. In the above example, the scale of `x1` is greater than `x3` and so the ratio of scales of `x1` and `x3` is greater than 1. This means that the p-value when ``alternative='greater'`` should be near 0 and hence we should be able to reject the null hypothesis: >>> ansari(x1, x3, alternative='greater') AnsariResult(statistic=425.0, pvalue=0.0001543510203987259) As we can see, the p-value is indeed quite low. Use of ``alternative='less'`` should thus yield a large p-value: >>> ansari(x1, x3, alternative='less') AnsariResult(statistic=425.0, pvalue=0.9998643258449039) """ if alternative not in {'two-sided', 'greater', 'less'}: raise ValueError("'alternative' must be 'two-sided'," " 'greater', or 'less'.") x, y = asarray(x), asarray(y) n = len(x) m = len(y) if m < 1: raise ValueError("Not enough other observations.") if n < 1: raise ValueError("Not enough test observations.") N = m + n xy = r_[x, y] # combine rank = stats.rankdata(xy) symrank = amin(array((rank, N - rank + 1)), 0) AB = np.sum(symrank[:n], axis=0) uxy = unique(xy) repeats = (len(uxy) != len(xy)) exact = ((m < 55) and (n < 55) and not repeats) if repeats and (m < 55 or n < 55): warnings.warn("Ties preclude use of exact statistic.") if exact: if alternative == 'two-sided': pval = 2.0 * np.minimum(_abw_state.cdf(AB, n, m), _abw_state.sf(AB, n, m)) elif alternative == 'greater': # AB statistic is _smaller_ when ratio of scales is larger, # so this is the opposite of the usual calculation pval = _abw_state.cdf(AB, n, m) else: pval = _abw_state.sf(AB, n, m) return AnsariResult(AB, min(1.0, pval)) # otherwise compute normal approximation if N % 2: # N odd mnAB = n * (N+1.0)**2 / 4.0 / N varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) else: mnAB = n * (N+2.0) / 4.0 varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) if repeats: # adjust variance estimates # compute np.sum(tj * rj**2,axis=0) fac = np.sum(symrank**2, axis=0) if N % 2: # N odd varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) else: # N even varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) # Small values of AB indicate larger dispersion for the x sample. # Large values of AB indicate larger dispersion for the y sample. # This is opposite to the way we define the ratio of scales. see [1]_. z = (mnAB - AB) / sqrt(varAB) z, pval = _normtest_finish(z, alternative) return AnsariResult(AB, pval) BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) def bartlett(*args): """Perform Bartlett's test for equal variances. Bartlett's test tests the null hypothesis that all input samples are from populations with equal variances. For samples from significantly non-normal populations, Levene's test `levene` is more robust. Parameters ---------- sample1, sample2,... : array_like arrays of sample data. Only 1d arrays are accepted, they may have different lengths. Returns ------- statistic : float The test statistic. pvalue : float The p-value of the test. See Also -------- fligner : A non-parametric test for the equality of k variances levene : A robust parametric test for equality of k variances Notes ----- Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power ([3]_). References ---------- .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical Tests. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. Examples -------- Test whether or not the lists `a`, `b` and `c` come from populations with equal variances. >>> from scipy.stats import bartlett >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] >>> stat, p = bartlett(a, b, c) >>> p 1.1254782518834628e-05 The very small p-value suggests that the populations do not have equal variances. This is not surprising, given that the sample variance of `b` is much larger than that of `a` and `c`: >>> [np.var(x, ddof=1) for x in [a, b, c]] [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] """ # Handle empty input and input that is not 1d for a in args: if np.asanyarray(a).size == 0: return BartlettResult(np.nan, np.nan) if np.asanyarray(a).ndim > 1: raise ValueError('Samples must be one-dimensional.') k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = np.empty(k) ssq = np.empty(k, 'd') for j in range(k): Ni[j] = len(args[j]) ssq[j] = np.var(args[j], ddof=1) Ntot = np.sum(Ni, axis=0) spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0) denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) - 1.0/(Ntot - k)) T = numer / denom pval = distributions.chi2.sf(T, k - 1) # 1 - cdf return BartlettResult(T, pval) LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) def levene(*args, center='median', proportiontocut=0.05): """Perform Levene test for equal variances. The Levene test tests the null hypothesis that all input samples are from populations with equal variances. Levene's test is an alternative to Bartlett's test `bartlett` in the case where there are significant deviations from normality. Parameters ---------- sample1, sample2, ... : array_like The sample data, possibly with different lengths. Only one-dimensional samples are accepted. center : {'mean', 'median', 'trimmed'}, optional Which function of the data to use in the test. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the test. Notes ----- Three variations of Levene's test are possible. The possibilities and their recommended usages are: * 'median' : Recommended for skewed (non-normal) distributions> * 'mean' : Recommended for symmetric, moderate-tailed distributions. * 'trimmed' : Recommended for heavy-tailed distributions. The test version using the mean was proposed in the original article of Levene ([2]_) while the median and trimmed mean have been studied by Brown and Forsythe ([3]_), sometimes also referred to as Brown-Forsythe test. References ---------- .. [1] https://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: Essays in Honor of Harold Hotelling, I. Olkin et al. eds., Stanford University Press, pp. 278-292. .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American Statistical Association, 69, 364-367 Examples -------- Test whether or not the lists `a`, `b` and `c` come from populations with equal variances. >>> from scipy.stats import levene >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] >>> stat, p = levene(a, b, c) >>> p 0.002431505967249681 The small p-value suggests that the populations do not have equal variances. This is not surprising, given that the sample variance of `b` is much larger than that of `a` and `c`: >>> [np.var(x, ddof=1) for x in [a, b, c]] [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] """ if center not in ['mean', 'median', 'trimmed']: raise ValueError("center must be 'mean', 'median' or 'trimmed'.") k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") # check for 1d input for j in range(k): if np.asanyarray(args[j]).ndim > 1: raise ValueError('Samples must be one-dimensional.') Ni = np.empty(k) Yci = np.empty(k, 'd') if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(np.sort(arg), proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) for j in range(k): Ni[j] = len(args[j]) Yci[j] = func(args[j]) Ntot = np.sum(Ni, axis=0) # compute Zij's Zij = [None] * k for i in range(k): Zij[i] = abs(asarray(args[i]) - Yci[i]) # compute Zbari Zbari = np.empty(k, 'd') Zbar = 0.0 for i in range(k): Zbari[i] = np.mean(Zij[i], axis=0) Zbar += Zbari[i] * Ni[i] Zbar /= Ntot numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0) # compute denom_variance dvar = 0.0 for i in range(k): dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0) denom = (k - 1.0) * dvar W = numer / denom pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf return LeveneResult(W, pval) def binom_test(x, n=None, p=0.5, alternative='two-sided'): """Perform a test that the probability of success is p. Note: `binom_test` is deprecated; it is recommended that `binomtest` be used instead. This is an exact, two-sided test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Parameters ---------- x : int or array_like The number of successes, or if x has length 2, it is the number of successes and the number of failures. n : int The number of trials. This is ignored if x gives both the number of successes and failures. p : float, optional The hypothesized probability of success. ``0 <= p <= 1``. The default value is ``p = 0.5``. alternative : {'two-sided', 'greater', 'less'}, optional Indicates the alternative hypothesis. The default value is 'two-sided'. Returns ------- p-value : float The p-value of the hypothesis test. References ---------- .. [1] https://en.wikipedia.org/wiki/Binomial_test Examples -------- >>> from scipy import stats A car manufacturer claims that no more than 10% of their cars are unsafe. 15 cars are inspected for safety, 3 were found to be unsafe. Test the manufacturer's claim: >>> stats.binom_test(3, n=15, p=0.1, alternative='greater') 0.18406106910639114 The null hypothesis cannot be rejected at the 5% level of significance because the returned p-value is greater than the critical value of 5%. """ x = atleast_1d(x).astype(np.int_) if len(x) == 2: n = x[1] + x[0] x = x[0] elif len(x) == 1: x = x[0] if n is None or n < x: raise ValueError("n must be >= x") n = np.int_(n) else: raise ValueError("Incorrect length for x.") if (p > 1.0) or (p < 0.0): raise ValueError("p must be in range [0,1]") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") if alternative == 'less': pval = distributions.binom.cdf(x, n, p) return pval if alternative == 'greater': pval = distributions.binom.sf(x-1, n, p) return pval # if alternative was neither 'less' nor 'greater', then it's 'two-sided' d = distributions.binom.pmf(x, n, p) rerr = 1 + 1e-7 if x == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif x < p * n: i = np.arange(np.ceil(p * n), n+1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(x, n, p) + distributions.binom.sf(n - y, n, p)) else: i = np.arange(np.floor(p*n) + 1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(y-1, n, p) + distributions.binom.sf(x-1, n, p)) return min(1.0, pval) def _apply_func(x, g, func): # g is list of indices into x # separating x into different groups # func should be applied over the groups g = unique(r_[0, g, len(x)]) output = [func(x[g[k]:g[k+1]]) for k in range(len(g) - 1)] return asarray(output) FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) def fligner(*args, center='median', proportiontocut=0.05): """Perform Fligner-Killeen test for equality of variance. Fligner's test tests the null hypothesis that all input samples are from populations with equal variances. Fligner-Killeen's test is distribution free when populations are identical [2]_. Parameters ---------- sample1, sample2, ... : array_like Arrays of sample data. Need not be the same length. center : {'mean', 'median', 'trimmed'}, optional Keyword argument controlling which function of the data is used in computing the test statistic. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the hypothesis test. See Also -------- bartlett : A parametric test for equality of k variances in normal samples levene : A robust parametric test for equality of k variances Notes ----- As with Levene's test there are three variants of Fligner's test that differ by the measure of central tendency used in the test. See `levene` for more information. Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. https://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample tests for scale. 'Journal of the American Statistical Association.' 71(353), 210-213. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A comparative study of tests for homogeneity of variances, with applications to the outer continental shelf biding data. Technometrics, 23(4), 351-361. Examples -------- Test whether or not the lists `a`, `b` and `c` come from populations with equal variances. >>> from scipy.stats import fligner >>> a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99] >>> b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05] >>> c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98] >>> stat, p = fligner(a, b, c) >>> p 0.00450826080004775 The small p-value suggests that the populations do not have equal variances. This is not surprising, given that the sample variance of `b` is much larger than that of `a` and `c`: >>> [np.var(x, ddof=1) for x in [a, b, c]] [0.007054444444444413, 0.13073888888888888, 0.008890000000000002] """ if center not in ['mean', 'median', 'trimmed']: raise ValueError("center must be 'mean', 'median' or 'trimmed'.") # Handle empty input for a in args: if np.asanyarray(a).size == 0: return FlignerResult(np.nan, np.nan) k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) Ni = asarray([len(args[j]) for j in range(k)]) Yci = asarray([func(args[j]) for j in range(k)]) Ntot = np.sum(Ni, axis=0) # compute Zij's Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)] allZij = [] g = [0] for i in range(k): allZij.extend(list(Zij[i])) g.append(len(allZij)) ranks = stats.rankdata(allZij) a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) # compute Aibar Aibar = _apply_func(a, g, np.sum) / Ni anbar = np.mean(a, axis=0) varsq = np.var(a, axis=0, ddof=1) Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf return FlignerResult(Xsq, pval) def mood(x, y, axis=0, alternative="two-sided"): """Perform Mood's test for equal scale parameters. Mood's two-sample test for scale parameters is a non-parametric test for the null hypothesis that two samples are drawn from the same distribution with the same scale parameter. Parameters ---------- x, y : array_like Arrays of sample data. axis : int, optional The axis along which the samples are tested. `x` and `y` can be of different length along `axis`. If `axis` is None, `x` and `y` are flattened and the test is done on all values in the flattened arrays. alternative : {'two-sided', 'less', 'greater'}, optional Defines the alternative hypothesis. Default is 'two-sided'. The following options are available: * 'two-sided': the scales of the distributions underlying `x` and `y` are different. * 'less': the scale of the distribution underlying `x` is less than the scale of the distribution underlying `y`. * 'greater': the scale of the distribution underlying `x` is greater than the scale of the distribution underlying `y`. .. versionadded:: 1.7.0 Returns ------- z : scalar or ndarray The z-score for the hypothesis test. For 1-D inputs a scalar is returned. p-value : scalar ndarray The p-value for the hypothesis test. See Also -------- fligner : A non-parametric test for the equality of k variances ansari : A non-parametric test for the equality of 2 variances bartlett : A parametric test for equality of k variances in normal samples levene : A parametric test for equality of k variances Notes ----- The data are assumed to be drawn from probability distributions ``f(x)`` and ``f(x/s) / s`` respectively, for some probability density function f. The null hypothesis is that ``s == 1``. For multi-dimensional arrays, if the inputs are of shapes ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the resulting z and p values will have shape ``(n0, n2, n3)``. Note that ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. Examples -------- >>> from scipy import stats >>> rng = np.random.default_rng() >>> x2 = rng.standard_normal((2, 45, 6, 7)) >>> x1 = rng.standard_normal((2, 30, 6, 7)) >>> z, p = stats.mood(x1, x2, axis=1) >>> p.shape (2, 6, 7) Find the number of points where the difference in scale is not significant: >>> (p > 0.1).sum() 78 Perform the test with different scales: >>> x1 = rng.standard_normal((2, 30)) >>> x2 = rng.standard_normal((2, 35)) * 10.0 >>> stats.mood(x1, x2, axis=1) (array([-5.76174136, -6.12650783]), array([8.32505043e-09, 8.98287869e-10])) """ x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) if axis is None: x = x.flatten() y = y.flatten() axis = 0 if axis < 0: axis = x.ndim + axis # Determine shape of the result arrays res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if ax != axis])): raise ValueError("Dimensions of x and y on all axes except `axis` " "should match") n = x.shape[axis] m = y.shape[axis] N = m + n if N < 3: raise ValueError("Not enough observations.") xy = np.concatenate((x, y), axis=axis) if axis != 0: xy = np.rollaxis(xy, axis) xy = xy.reshape(xy.shape[0], -1) # Generalized to the n-dimensional case by adding the axis argument, and # using for loops, since rankdata is not vectorized. For improving # performance consider vectorizing rankdata function. all_ranks = np.empty_like(xy) for j in range(xy.shape[1]): all_ranks[:, j] = stats.rankdata(xy[:, j]) Ri = all_ranks[:n] M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0) # Approx stat. mnM = n * (N * N - 1.0) / 12 varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 z = (M - mnM) / sqrt(varM) z, pval = _normtest_finish(z, alternative) if res_shape == (): # Return scalars, not 0-D arrays z = z[0] pval = pval[0] else: z.shape = res_shape pval.shape = res_shape return z, pval WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue')) def wilcoxon(x, y=None, zero_method="wilcox", correction=False, alternative="two-sided", mode='auto'): """Calculate the Wilcoxon signed-rank test. The Wilcoxon signed-rank test tests the null hypothesis that two related paired samples come from the same distribution. In particular, it tests whether the distribution of the differences x - y is symmetric about zero. It is a non-parametric version of the paired T-test. Parameters ---------- x : array_like Either the first set of measurements (in which case ``y`` is the second set of measurements), or the differences between two sets of measurements (in which case ``y`` is not to be specified.) Must be one-dimensional. y : array_like, optional Either the second set of measurements (if ``x`` is the first set of measurements), or not specified (if ``x`` is the differences between two sets of measurements.) Must be one-dimensional. zero_method : {"pratt", "wilcox", "zsplit"}, optional The following options are available (default is "wilcox"): * "pratt": Includes zero-differences in the ranking process, but drops the ranks of the zeros, see [4]_, (more conservative). * "wilcox": Discards all zero-differences, the default. * "zsplit": Includes zero-differences in the ranking process and split the zero rank between positive and negative ones. correction : bool, optional If True, apply continuity correction by adjusting the Wilcoxon rank statistic by 0.5 towards the mean value when computing the z-statistic if a normal approximation is used. Default is False. alternative : {"two-sided", "greater", "less"}, optional The alternative hypothesis to be tested, see Notes. Default is "two-sided". mode : {"auto", "exact", "approx"} Method to calculate the p-value, see Notes. Default is "auto". Returns ------- statistic : float If ``alternative`` is "two-sided", the sum of the ranks of the differences above or below zero, whichever is smaller. Otherwise the sum of the ranks of the differences above zero. pvalue : float The p-value for the test depending on ``alternative`` and ``mode``. See Also -------- kruskal, mannwhitneyu Notes ----- The test has been introduced in [4]_. Given n independent samples (xi, yi) from a bivariate distribution (i.e. paired samples), it computes the differences di = xi - yi. One assumption of the test is that the differences are symmetric, see [2]_. The two-sided test has the null hypothesis that the median of the differences is zero against the alternative that it is different from zero. The one-sided test has the null hypothesis that the median is positive against the alternative that it is negative (``alternative == 'less'``), or vice versa (``alternative == 'greater.'``). To derive the p-value, the exact distribution (``mode == 'exact'``) can be used for sample sizes of up to 25. The default ``mode == 'auto'`` uses the exact distribution if there are at most 25 observations and no ties, otherwise a normal approximation is used (``mode == 'approx'``). The treatment of ties can be controlled by the parameter `zero_method`. If ``zero_method == 'pratt'``, the normal approximation is adjusted as in [5]_. A typical rule is to require that n > 20 ([2]_, p. 383). References ---------- .. [1] https://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test .. [2] Conover, W.J., Practical Nonparametric Statistics, 1971. .. [3] Pratt, J.W., Remarks on Zeros and Ties in the Wilcoxon Signed Rank Procedures, Journal of the American Statistical Association, Vol. 54, 1959, pp. 655-667. :doi:`10.1080/01621459.1959.10501526` .. [4] Wilcoxon, F., Individual Comparisons by Ranking Methods, Biometrics Bulletin, Vol. 1, 1945, pp. 80-83. :doi:`10.2307/3001968` .. [5] Cureton, E.E., The Normal Approximation to the Signed-Rank Sampling Distribution When Zero Differences are Present, Journal of the American Statistical Association, Vol. 62, 1967, pp. 1068-1069. :doi:`10.1080/01621459.1967.10500917` Examples -------- In [4]_, the differences in height between cross- and self-fertilized corn plants is given as follows: >>> d = [6, 8, 14, 16, 23, 24, 28, 29, 41, -48, 49, 56, 60, -67, 75] Cross-fertilized plants appear to be be higher. To test the null hypothesis that there is no height difference, we can apply the two-sided test: >>> from scipy.stats import wilcoxon >>> w, p = wilcoxon(d) >>> w, p (24.0, 0.041259765625) Hence, we would reject the null hypothesis at a confidence level of 5%, concluding that there is a difference in height between the groups. To confirm that the median of the differences can be assumed to be positive, we use: >>> w, p = wilcoxon(d, alternative='greater') >>> w, p (96.0, 0.0206298828125) This shows that the null hypothesis that the median is negative can be rejected at a confidence level of 5% in favor of the alternative that the median is greater than zero. The p-values above are exact. Using the normal approximation gives very similar values: >>> w, p = wilcoxon(d, mode='approx') >>> w, p (24.0, 0.04088813291185591) Note that the statistic changed to 96 in the one-sided case (the sum of ranks of positive differences) whereas it is 24 in the two-sided case (the minimum of sum of ranks above and below zero). """ if mode not in ["auto", "approx", "exact"]: raise ValueError("mode must be either 'auto', 'approx' or 'exact'") if zero_method not in ["wilcox", "pratt", "zsplit"]: raise ValueError("Zero method must be either 'wilcox' " "or 'pratt' or 'zsplit'") if alternative not in ["two-sided", "less", "greater"]: raise ValueError("Alternative must be either 'two-sided', " "'greater' or 'less'") if y is None: d = asarray(x) if d.ndim > 1: raise ValueError('Sample x must be one-dimensional.') else: x, y = map(asarray, (x, y)) if x.ndim > 1 or y.ndim > 1: raise ValueError('Samples x and y must be one-dimensional.') if len(x) != len(y): raise ValueError('The samples x and y must have the same length.') d = x - y if mode == "auto": if len(d) <= 25: mode = "exact" else: mode = "approx" n_zero = np.sum(d == 0) if n_zero > 0 and mode == "exact": mode = "approx" warnings.warn("Exact p-value calculation does not work if there are " "ties. Switching to normal approximation.") if mode == "approx": if zero_method in ["wilcox", "pratt"]: if n_zero == len(d): raise ValueError("zero_method 'wilcox' and 'pratt' do not " "work if x - y is zero for all elements.") if zero_method == "wilcox": # Keep all non-zero differences d = compress(np.not_equal(d, 0), d) count = len(d) if count < 10 and mode == "approx": warnings.warn("Sample size too small for normal approximation.") r = stats.rankdata(abs(d)) r_plus = np.sum((d > 0) * r) r_minus = np.sum((d < 0) * r) if zero_method == "zsplit": r_zero = np.sum((d == 0) * r) r_plus += r_zero / 2. r_minus += r_zero / 2. # return min for two-sided test, but r_plus for one-sided test # the literature is not consistent here # r_plus is more informative since r_plus + r_minus = count*(count+1)/2, # i.e. the sum of the ranks, so r_minus and the min can be inferred # (If alternative='pratt', r_plus + r_minus = count*(count+1)/2 - r_zero.) # [3] uses the r_plus for the one-sided test, keep min for two-sided test # to keep backwards compatibility if alternative == "two-sided": T = min(r_plus, r_minus) else: T = r_plus if mode == "approx": mn = count * (count + 1.) * 0.25 se = count * (count + 1.) * (2. * count + 1.) if zero_method == "pratt": r = r[d != 0] # normal approximation needs to be adjusted, see Cureton (1967) mn -= n_zero * (n_zero + 1.) * 0.25 se -= n_zero * (n_zero + 1.) * (2. * n_zero + 1.) replist, repnum = find_repeats(r) if repnum.size != 0: # Correction for repeated elements. se -= 0.5 * (repnum * (repnum * repnum - 1)).sum() se = sqrt(se / 24) # apply continuity correction if applicable d = 0 if correction: if alternative == "two-sided": d = 0.5 * np.sign(T - mn) elif alternative == "less": d = -0.5 else: d = 0.5 # compute statistic and p-value using normal approximation z = (T - mn - d) / se if alternative == "two-sided": prob = 2. * distributions.norm.sf(abs(z)) elif alternative == "greater": # large T = r_plus indicates x is greater than y; i.e. # accept alternative in that case and return small p-value (sf) prob = distributions.norm.sf(z) else: prob = distributions.norm.cdf(z) elif mode == "exact": # get frequencies cnt of the possible positive ranksums r_plus cnt = _get_wilcoxon_distr(count) # note: r_plus is int (ties not allowed), need int for slices below r_plus = int(r_plus) if alternative == "two-sided": if r_plus == (len(cnt) - 1) // 2: # r_plus is the center of the distribution. prob = 1.0 else: p_less = np.sum(cnt[:r_plus + 1]) / 2**count p_greater = np.sum(cnt[r_plus:]) / 2**count prob = 2*min(p_greater, p_less) elif alternative == "greater": prob = np.sum(cnt[r_plus:]) / 2**count else: prob = np.sum(cnt[:r_plus + 1]) / 2**count return WilcoxonResult(T, prob) def median_test(*args, ties='below', correction=True, lambda_=1, nan_policy='propagate'): """Perform a Mood's median test. Test that two or more samples come from populations with the same median. Let ``n = len(args)`` be the number of samples. The "grand median" of all the data is computed, and a contingency table is formed by classifying the values in each sample as being above or below the grand median. The contingency table, along with `correction` and `lambda_`, are passed to `scipy.stats.chi2_contingency` to compute the test statistic and p-value. Parameters ---------- sample1, sample2, ... : array_like The set of samples. There must be at least two samples. Each sample must be a one-dimensional sequence containing at least one value. The samples are not required to have the same length. ties : str, optional Determines how values equal to the grand median are classified in the contingency table. The string must be one of:: "below": Values equal to the grand median are counted as "below". "above": Values equal to the grand median are counted as "above". "ignore": Values equal to the grand median are not counted. The default is "below". correction : bool, optional If True, *and* there are just two samples, apply Yates' correction for continuity when computing the test statistic associated with the contingency table. Default is True. lambda_ : float or str, optional By default, the statistic computed in this test is Pearson's chi-squared statistic. `lambda_` allows a statistic from the Cressie-Read power divergence family to be used instead. See `power_divergence` for details. Default is 1 (Pearson's chi-squared statistic). nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- stat : float The test statistic. The statistic that is returned is determined by `lambda_`. The default is Pearson's chi-squared statistic. p : float The p-value of the test. m : float The grand median. table : ndarray The contingency table. The shape of the table is (2, n), where n is the number of samples. The first row holds the counts of the values above the grand median, and the second row holds the counts of the values below the grand median. The table allows further analysis with, for example, `scipy.stats.chi2_contingency`, or with `scipy.stats.fisher_exact` if there are two samples, without having to recompute the table. If ``nan_policy`` is "propagate" and there are nans in the input, the return value for ``table`` is ``None``. See Also -------- kruskal : Compute the Kruskal-Wallis H-test for independent samples. mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. Notes ----- .. versionadded:: 0.15.0 References ---------- .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill (1950), pp. 394-399. .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). See Sections 8.12 and 10.15. Examples -------- A biologist runs an experiment in which there are three groups of plants. Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. Each plant produces a number of seeds. The seed counts for each group are:: Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 The following code applies Mood's median test to these samples. >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] >>> from scipy.stats import median_test >>> stat, p, med, tbl = median_test(g1, g2, g3) The median is >>> med 34.0 and the contingency table is >>> tbl array([[ 5, 10, 7], [11, 5, 10]]) `p` is too large to conclude that the medians are not the same: >>> p 0.12609082774093244 The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to `median_test`. >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood") >>> p 0.12224779737117837 The median occurs several times in the data, so we'll get a different result if, for example, ``ties="above"`` is used: >>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above") >>> p 0.063873276069553273 >>> tbl array([[ 5, 11, 9], [11, 4, 8]]) This example demonstrates that if the data set is not large and there are values equal to the median, the p-value can be sensitive to the choice of `ties`. """ if len(args) < 2: raise ValueError('median_test requires two or more samples.') ties_options = ['below', 'above', 'ignore'] if ties not in ties_options: raise ValueError("invalid 'ties' option '%s'; 'ties' must be one " "of: %s" % (ties, str(ties_options)[1:-1])) data = [np.asarray(arg) for arg in args] # Validate the sizes and shapes of the arguments. for k, d in enumerate(data): if d.size == 0: raise ValueError("Sample %d is empty. All samples must " "contain at least one value." % (k + 1)) if d.ndim != 1: raise ValueError("Sample %d has %d dimensions. All " "samples must be one-dimensional sequences." % (k + 1, d.ndim)) cdata = np.concatenate(data) contains_nan, nan_policy = _contains_nan(cdata, nan_policy) if contains_nan and nan_policy == 'propagate': return np.nan, np.nan, np.nan, None if contains_nan: grand_median = np.median(cdata[~np.isnan(cdata)]) else: grand_median = np.median(cdata) # When the minimum version of numpy supported by scipy is 1.9.0, # the above if/else statement can be replaced by the single line: # grand_median = np.nanmedian(cdata) # Create the contingency table. table = np.zeros((2, len(data)), dtype=np.int64) for k, sample in enumerate(data): sample = sample[~np.isnan(sample)] nabove = count_nonzero(sample > grand_median) nbelow = count_nonzero(sample < grand_median) nequal = sample.size - (nabove + nbelow) table[0, k] += nabove table[1, k] += nbelow if ties == "below": table[1, k] += nequal elif ties == "above": table[0, k] += nequal # Check that no row or column of the table is all zero. # Such a table can not be given to chi2_contingency, because it would have # a zero in the table of expected frequencies. rowsums = table.sum(axis=1) if rowsums[0] == 0: raise ValueError("All values are below the grand median (%r)." % grand_median) if rowsums[1] == 0: raise ValueError("All values are above the grand median (%r)." % grand_median) if ties == "ignore": # We already checked that each sample has at least one value, but it # is possible that all those values equal the grand median. If `ties` # is "ignore", that would result in a column of zeros in `table`. We # check for that case here. zero_cols = np.nonzero((table == 0).all(axis=0))[0] if len(zero_cols) > 0: msg = ("All values in sample %d are equal to the grand " "median (%r), so they are ignored, resulting in an " "empty sample." % (zero_cols[0] + 1, grand_median)) raise ValueError(msg) stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, correction=correction) return stat, p, grand_median, table def _circfuncs_common(samples, high, low, nan_policy='propagate'): # Ensure samples are array-like and size is not zero samples = np.asarray(samples) if samples.size == 0: return np.nan, np.asarray(np.nan), np.asarray(np.nan), None # Recast samples as radians that range between 0 and 2 pi and calculate # the sine and cosine sin_samp = sin((samples - low)*2.*pi / (high - low)) cos_samp = cos((samples - low)*2.*pi / (high - low)) # Apply the NaN policy contains_nan, nan_policy = _contains_nan(samples, nan_policy) if contains_nan and nan_policy == 'omit': mask = np.isnan(samples) # Set the sines and cosines that are NaN to zero sin_samp[mask] = 0.0 cos_samp[mask] = 0.0 else: mask = None return samples, sin_samp, cos_samp, mask def circmean(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): """Compute the circular mean for samples in a range. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular mean range. Default is ``2*pi``. low : float or int, optional Low boundary for circular mean range. Default is 0. axis : int, optional Axis along which means are computed. The default is to compute the mean of the flattened array. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- circmean : float Circular mean. Examples -------- >>> from scipy.stats import circmean >>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3]) 0.2 >>> from scipy.stats import circmean >>> circmean([0.2, 1.4, 2.6], high = 1, low = 0) 0.4 """ samples, sin_samp, cos_samp, nmask = _circfuncs_common(samples, high, low, nan_policy=nan_policy) sin_sum = sin_samp.sum(axis=axis) cos_sum = cos_samp.sum(axis=axis) res = arctan2(sin_sum, cos_sum) mask_nan = ~np.isnan(res) if mask_nan.ndim > 0: mask = res[mask_nan] < 0 else: mask = res < 0 if mask.ndim > 0: mask_nan[mask_nan] = mask res[mask_nan] += 2*pi elif mask: res += 2*pi # Set output to NaN if no samples went into the mean if nmask is not None: if nmask.all(): res = np.full(shape=res.shape, fill_value=np.nan) else: # Find out if any of the axis that are being averaged consist # entirely of NaN. If one exists, set the result (res) to NaN nshape = 0 if axis is None else axis smask = nmask.shape[nshape] == nmask.sum(axis=axis) if smask.any(): res[smask] = np.nan return res*(high - low)/2.0/pi + low def circvar(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): """Compute the circular variance for samples assumed to be in a range. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular variance range. Default is ``2*pi``. low : float or int, optional Low boundary for circular variance range. Default is 0. axis : int, optional Axis along which variances are computed. The default is to compute the variance of the flattened array. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- circvar : float Circular variance. Notes ----- This uses a definition of circular variance that in the limit of small angles returns a number close to the 'linear' variance. Examples -------- >>> from scipy.stats import circvar >>> circvar([0, 2*np.pi/3, 5*np.pi/3]) 2.19722457734 """ samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low, nan_policy=nan_policy) if mask is None: sin_mean = sin_samp.mean(axis=axis) cos_mean = cos_samp.mean(axis=axis) else: nsum = np.asarray(np.sum(~mask, axis=axis).astype(float)) nsum[nsum == 0] = np.nan sin_mean = sin_samp.sum(axis=axis) / nsum cos_mean = cos_samp.sum(axis=axis) / nsum # hypot can go slightly above 1 due to rounding errors with np.errstate(invalid='ignore'): R = np.minimum(1, hypot(sin_mean, cos_mean)) return ((high - low)/2.0/pi)**2 * -2 * log(R) def circstd(samples, high=2*pi, low=0, axis=None, nan_policy='propagate'): """ Compute the circular standard deviation for samples assumed to be in the range [low to high]. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular standard deviation range. Default is ``2*pi``. low : float or int, optional Low boundary for circular standard deviation range. Default is 0. axis : int, optional Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- circstd : float Circular standard deviation. Notes ----- This uses a definition of circular standard deviation that in the limit of small angles returns a number close to the 'linear' standard deviation. Examples -------- >>> from scipy.stats import circstd >>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2]) 0.063564063306 """ samples, sin_samp, cos_samp, mask = _circfuncs_common(samples, high, low, nan_policy=nan_policy) if mask is None: sin_mean = sin_samp.mean(axis=axis) cos_mean = cos_samp.mean(axis=axis) else: nsum = np.asarray(np.sum(~mask, axis=axis).astype(float)) nsum[nsum == 0] = np.nan sin_mean = sin_samp.sum(axis=axis) / nsum cos_mean = cos_samp.sum(axis=axis) / nsum # hypot can go slightly above 1 due to rounding errors with np.errstate(invalid='ignore'): R = np.minimum(1, hypot(sin_mean, cos_mean)) return ((high - low)/2.0/pi) * sqrt(-2*log(R))
bsd-3-clause
hrjn/scikit-learn
examples/decomposition/plot_pca_vs_lda.py
176
2027
""" ======================================================= Comparison of LDA and PCA 2D projection of Iris dataset ======================================================= The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. Principal Component Analysis (PCA) applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. Linear Discriminant Analysis (LDA) tries to identify attributes that account for the most variance *between classes*. In particular, LDA, in contrast to PCA, is a supervised method, using known class labels. """ print(__doc__) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis iris = datasets.load_iris() X = iris.data y = iris.target target_names = iris.target_names pca = PCA(n_components=2) X_r = pca.fit(X).transform(X) lda = LinearDiscriminantAnalysis(n_components=2) X_r2 = lda.fit(X, y).transform(X) # Percentage of variance explained for each components print('explained variance ratio (first two components): %s' % str(pca.explained_variance_ratio_)) plt.figure() colors = ['navy', 'turquoise', 'darkorange'] lw = 2 for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('PCA of IRIS dataset') plt.figure() for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('LDA of IRIS dataset') plt.show()
bsd-3-clause
ortylp/scipy
scipy/stats/_binned_statistic.py
17
17622
from __future__ import division, print_function, absolute_import import warnings import numpy as np from scipy._lib.six import callable from collections import namedtuple def binned_statistic(x, values, statistic='mean', bins=10, range=None): """ Compute a binned statistic for a set of data. This is a generalization of a histogram function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values within each bin. Parameters ---------- x : array_like A sequence of values to be binned. values : array_like The values on which the statistic will be computed. This must be the same shape as `x`. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : int or sequence of scalars, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10 by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. Values in `x` that are smaller than lowest bin edge are assigned to bin number 0, values beyond the highest bin are assigned to ``bins[-1]``. range : (float, float) or [(float, float)], optional The lower and upper range of the bins. If not provided, range is simply ``(x.min(), x.max())``. Values outside the range are ignored. Returns ------- statistic : array The values of the selected statistic in each bin. bin_edges : array of dtype float Return the bin edges ``(length(statistic)+1)``. binnumber : 1-D ndarray of ints This assigns to each observation an integer that represents the bin in which this observation falls. Array has the same length as values. See Also -------- numpy.histogram, binned_statistic_2d, binned_statistic_dd Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 0.11.0 Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt First a basic example: >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', ... bins=3) (array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3])) As a second example, we now generate some random data of sailing boat speed as a function of wind speed, and then determine how fast our boat is for certain wind speeds: >>> windspeed = 8 * np.random.rand(500) >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500) >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed, ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7]) >>> plt.figure() >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data') >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5, ... label='binned statistic of data') >>> plt.legend() Now we can use ``binnumber`` to select all datapoints with a windspeed below 1: >>> low_boatspeed = boatspeed[binnumber == 0] As a final example, we will use ``bin_edges`` and ``binnumber`` to make a plot of a distribution that shows the mean and distribution around that mean per bin, on top of a regular histogram and the probability distribution function: >>> x = np.linspace(0, 5, num=500) >>> x_pdf = stats.maxwell.pdf(x) >>> samples = stats.maxwell.rvs(size=10000) >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf, ... statistic='mean', bins=25) >>> bin_width = (bin_edges[1] - bin_edges[0]) >>> bin_centers = bin_edges[1:] - bin_width/2 >>> plt.figure() >>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2, ... label='histogram of data') >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf') >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2, ... label='binned statistic of data') >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5) >>> plt.legend(fontsize=10) >>> plt.show() """ try: N = len(bins) except TypeError: N = 1 if N != 1: bins = [np.asarray(bins, float)] if range is not None: if len(range) == 2: range = [range] medians, edges, xy = binned_statistic_dd([x], values, statistic, bins, range) BinnedStatisticResult = namedtuple('BinnedStatisticResult', ('statistic', 'bin_edges', 'binnumber')) return BinnedStatisticResult(medians, edges[0], xy) def binned_statistic_2d(x, y, values, statistic='mean', bins=10, range=None): """ Compute a bidimensional binned statistic for a set of data. This is a generalization of a histogram2d function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values within each bin. Parameters ---------- x : (N,) array_like A sequence of values to be binned along the first dimension. y : (M,) array_like A sequence of values to be binned along the second dimension. values : (N,) array_like The values on which the statistic will be computed. This must be the same shape as `x`. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : int or [int, int] or array_like or [array, array], optional The bin specification: * the number of bins for the two dimensions (nx=ny=bins), * the number of bins in each dimension (nx, ny = bins), * the bin edges for the two dimensions (x_edges = y_edges = bins), * the bin edges in each dimension (x_edges, y_edges = bins). range : (2,2) array_like, optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be considered outliers and not tallied in the histogram. Returns ------- statistic : (nx, ny) ndarray The values of the selected statistic in each two-dimensional bin x_edges : (nx + 1) ndarray The bin edges along the first dimension. y_edges : (ny + 1) ndarray The bin edges along the second dimension. binnumber : 1-D ndarray of ints This assigns to each observation an integer that represents the bin in which this observation falls. Array has the same length as `values`. See Also -------- numpy.histogram2d, binned_statistic, binned_statistic_dd Notes ----- .. versionadded:: 0.11.0 """ # This code is based on np.histogram2d try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = np.asarray(bins, float) bins = [xedges, yedges] medians, edges, xy = binned_statistic_dd([x, y], values, statistic, bins, range) BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult', ('statistic', 'x_edge', 'y_edge', 'binnumber')) return BinnedStatistic2dResult(medians, edges[0], edges[1], xy) def binned_statistic_dd(sample, values, statistic='mean', bins=10, range=None): """ Compute a multidimensional binned statistic for a set of data. This is a generalization of a histogramdd function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values within each bin. Parameters ---------- sample : array_like Data to histogram passed as a sequence of D arrays of length N, or as an (N,D) array. values : array_like The values on which the statistic will be computed. This must be the same shape as x. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : sequence or int, optional The bin specification: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... =bins) * The number of bins for all dimensions (nx=ny=...=bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitely in `bins`. Defaults to the minimum and maximum values along each dimension. Returns ------- statistic : ndarray, shape(nx1, nx2, nx3,...) The values of the selected statistic in each two-dimensional bin bin_edges : list of ndarrays A list of D arrays describing the (nxi + 1) bin edges for each dimension binnumber : 1-D ndarray of ints This assigns to each observation an integer that represents the bin in which this observation falls. Array has the same length as values. See Also -------- np.histogramdd, binned_statistic, binned_statistic_2d Notes ----- .. versionadded:: 0.11.0 """ known_stats = ['mean', 'median', 'count', 'sum', 'std'] if not callable(statistic) and statistic not in known_stats: raise ValueError('invalid statistic %r' % (statistic,)) # This code is based on np.histogramdd try: # Sample is an ND-array. N, D = sample.shape except (AttributeError, ValueError): # Sample is a sequence of 1D arrays. sample = np.atleast_2d(sample).T N, D = sample.shape nbin = np.empty(D, int) edges = D * [None] dedges = D * [None] try: M = len(bins) if M != D: raise AttributeError('The dimension of bins must be equal ' 'to the dimension of the sample x.') except TypeError: bins = D * [bins] # Select range for each dimension # Used only if number of bins is given. if range is None: smin = np.atleast_1d(np.array(sample.min(0), float)) smax = np.atleast_1d(np.array(sample.max(0), float)) else: smin = np.zeros(D) smax = np.zeros(D) for i in np.arange(D): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in np.arange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # Create edge arrays for i in np.arange(D): if np.isscalar(bins[i]): nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1) else: edges[i] = np.asarray(bins[i], float) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = np.diff(edges[i]) nbin = np.asarray(nbin) # Compute the bin number each sample falls into. Ncount = {} for i in np.arange(D): Ncount[i] = np.digitize(sample[:, i], edges[i]) # Using digitize, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right # edge to be counted in the last bin, and not as an outlier. for i in np.arange(D): # Rounding precision decimal = int(-np.log10(dedges[i].min())) + 6 # Find which points are on the rightmost edge. on_edge = np.where(np.around(sample[:, i], decimal) == np.around(edges[i][-1], decimal))[0] # Shift these points one bin to the left. Ncount[i][on_edge] -= 1 # Compute the sample indices in the flattened statistic matrix. ni = nbin.argsort() xy = np.zeros(N, int) for i in np.arange(0, D - 1): xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod() xy += Ncount[ni[-1]] result = np.empty(nbin.prod(), float) if statistic == 'mean': result.fill(np.nan) flatcount = np.bincount(xy, None) flatsum = np.bincount(xy, values) a = flatcount.nonzero() result[a] = flatsum[a] / flatcount[a] elif statistic == 'std': result.fill(0) flatcount = np.bincount(xy, None) flatsum = np.bincount(xy, values) flatsum2 = np.bincount(xy, values ** 2) a = flatcount.nonzero() result[a] = np.sqrt(flatsum2[a] / flatcount[a] - (flatsum[a] / flatcount[a]) ** 2) elif statistic == 'count': result.fill(0) flatcount = np.bincount(xy, None) a = np.arange(len(flatcount)) result[a] = flatcount elif statistic == 'sum': result.fill(0) flatsum = np.bincount(xy, values) a = np.arange(len(flatsum)) result[a] = flatsum elif statistic == 'median': result.fill(np.nan) for i in np.unique(xy): result[i] = np.median(values[xy == i]) elif callable(statistic): with warnings.catch_warnings(): # Numpy generates a warnings for mean/std/... with empty list warnings.filterwarnings('ignore', category=RuntimeWarning) old = np.seterr(invalid='ignore') try: null = statistic([]) except: null = np.nan np.seterr(**old) result.fill(null) for i in np.unique(xy): result[i] = statistic(values[xy == i]) # Shape into a proper matrix result = result.reshape(np.sort(nbin)) for i in np.arange(nbin.size): j = ni.argsort()[i] result = result.swapaxes(i, j) ni[i], ni[j] = ni[j], ni[i] # Remove outliers (indices 0 and -1 for each dimension). core = D * [slice(1, -1)] result = result[core] if (result.shape != nbin - 2).any(): raise RuntimeError('Internal Shape Error') BinnedStatisticddResult = namedtuple('BinnedStatisticddResult', ('statistic', 'bin_edges', 'binnumber')) return BinnedStatisticddResult(result, edges, xy)
bsd-3-clause
Djabbz/scikit-learn
examples/svm/plot_iris.py
225
3252
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) plt.subplots_adjust(wspace=0.4, hspace=0.4) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show()
bsd-3-clause
ISEF-Vision/vison_python
core/pattern/plot.py
1
2882
import matplotlib.pyplot as plt import numpy class Plot: def __init__(self, patter_recognition): self.pr = patter_recognition self.fig = plt.figure(figsize=(15,6)) self.fig.patch.set_facecolor('w') plt.ion() self.avg_plot = self.fig.add_subplot(212) self.avg_max = 15 self.line_plot = self.fig.add_subplot(211) self.line_max = 15 self.avg_plot.set_xlim([0, 3]) self.avg_plot.set_ylim([0, self.avg_max]) self.line_plot.set_xlim([0, 3]) self.line_plot.set_ylim([0, self.line_max]) self.plots = { "left_border": { "line": self.line_plot.plot([], [], color='blue', label='left_border')[0], "avg_line": self.avg_plot.plot([], [], color='blue', label='left_border')[0] }, "right_vector": { "line": self.line_plot.plot([], [], color='pink', label='right_vector')[0], "avg_line": self.avg_plot.plot([], [], color='pink', label='right_vector')[0] }, "horizon": { "line": self.line_plot.plot([], [], color='green', label='horizon')[0], "avg_line": self.avg_plot.plot([], [], color='green', label='horizon')[0] }, "right_border": { "line": self.line_plot.plot([], [], color='red', label='right_border')[0], "avg_line": self.avg_plot.plot([], [], color='red', label='right_border')[0] }, "left_vector": { "line": self.line_plot.plot([], [], color='gray', label='left_vector')[0], "avg_line": self.avg_plot.plot([], [], color='gray', label='left_vector')[0] }, } self.avg_plot.legend() self.line_plot.legend() def update(self): for key, value in self.pr.lines.items(): index = len(self.pr.lines[key]["avg_array"]) self.plots[key]["avg_line"].set_xdata(numpy.arange(index)) self.plots[key]["avg_line"].set_ydata(self.pr.lines[key]["avg_array"]) self.plots[key]["line"].set_xdata(numpy.arange(index)) self.plots[key]["line"].set_ydata(self.pr.lines[key]["array"]) if self.avg_max < self.pr.lines[key]["avg_array"][-1]: self.avg_max = self.pr.lines[key]["avg_array"][-1] + 10 self.avg_plot.set_ylim([0, self.avg_max]) if self.line_max < self.pr.lines[key]["array"][-1]: self.line_max = self.pr.lines[key]["array"][-1] + 10 self.line_plot.set_ylim([0, self.line_max]) plt.draw() plt.pause(0.01) self.avg_plot.set_xlim([0, self.pr.index + 10]) self.line_plot.set_xlim([0, self.pr.index + 10]) def save(self, name ="figure.png"): self.fig.savefig("./chart/"+name+".png")
apache-2.0
saimn/glue
glue/app/qt/tests/test_terminal.py
2
2285
from __future__ import absolute_import, division, print_function from mock import MagicMock, patch from glue.tests.helpers import requires_ipython, IPYTHON_INSTALLED if IPYTHON_INSTALLED: from ..terminal import glue_terminal @requires_ipython class TestTerminal(object): def test_mpl_non_interactive(self): """IPython v0.12 sometimes turns on mpl interactive. Ensure we catch that""" import matplotlib assert not matplotlib.is_interactive() gt = glue_terminal() assert not matplotlib.is_interactive() def test_update_namespace(self): """Test that top level namespace API works without error""" gt = glue_terminal() gt.update_namespace({'x': 3}) assert 'x' in gt.namespace def test_accepts_drops(self): gt = glue_terminal() assert gt.acceptDrops() def test_drops_update_namespace(self): """DnD adds variable name to namespace""" with patch('glue.app.qt.terminal.QtWidgets.QInputDialog') as dialog: dialog.getText.return_value = 'accept_var', True gt = glue_terminal() event = MagicMock() event.mimeData().data.return_value = [5] gt.dropEvent(event) assert gt.namespace.get('accept_var') == 5 def test_cancel_drop(self): """Drop not added if user cancels dialog box""" with patch('glue.app.qt.terminal.QtWidgets.QInputDialog') as dialog: dialog.getText.return_value = 'cancel_var', False gt = glue_terminal() event = MagicMock() event.mimeData().data.return_value = [5] gt.dropEvent(event) assert 'cancel_var' not in gt.namespace def test_ignore_drag_enter(self): event = MagicMock() event.mimeData().hasFormat.return_value = False gt = glue_terminal() gt.dragEnterEvent(event) event.ignore.assert_called_once_with() def test_accept_drag_enter(self): event = MagicMock() event.mimeData().hasFormat.return_value = True gt = glue_terminal() gt.dragEnterEvent(event) event.accept.assert_called_once_with() if __name__ == "__main__": import pytest pytest.main([__file__])
bsd-3-clause
archonren/project
preprocess/extract_all_features.py
1
7165
import os from scipy.io import loadmat from skimage.color import rgb2gray from numpy import array, vstack, reshape, delete from skimage.feature import local_binary_pattern from skimage.feature import hog from skimage.filters import sobel_h,sobel_v from scipy.io import savemat from sklearn.metrics import precision_recall_fscore_support from sklearn.ensemble import AdaBoostClassifier from sklearn import preprocessing from sklearn.feature_selection import VarianceThreshold from sklearn.svm import SVC def normalize_data(x): scaler = preprocessing.StandardScaler().fit(x) return scaler def make_feature_vector(D, num_of_files, input_fldr, file_heading): feature = D['feature_vector'] #first 13 features target = D['target'] all_super_features = [] #to have all 18 features for n in range(num_of_files): #get r, g, b values for superpixels of the image being processed now super_image = vstack((feature[0])[n][:,k] for k in range(3)) super_image = super_image.T #this if does the following: #1. limits number of superpixels to 385 in the hope that all images have at least 385 images #Note: Case where images have less than 385 superpixels - throws error #2. The superpixels on the extreme right edge of the image is removed. This is based on an approximation that there are approximately 11 superpixels along the y-axis of the image. #This assumptions seems to be consistent throughout the training data - um, umm and uu. #3. When these superpixels are removed, their corresponding features in the feature vector and the targets in the target vector too are removed. if(super_image.shape[0] > 385): diff = super_image.shape[0] - 385 for i in range(diff): #remove from r, g, b of superpixels super_image = delete(super_image, (i+1)*11, 0) #super_image.shape[0]-1 #remove from targets ((target[:,n])[0]) = delete(((target[:,n])[0]), (i+1)*11, 0) #delete extreme rightmost column of superpixels(hopefully) #remove from feature vector (feature[:,n])[0] = delete((feature[:,n])[0], (i+1)*11, 0) #reshape the superpixel to an approximated dimension of 11*35 (This can be later automated and read from the feature vector for better performance and accuracy. Leaving this for now.) super_image = reshape(super_image,(11, 35, 3)) #convert to grayscale gray = rgb2gray(super_image) #these features are dependent on the shape of the image, i.e. image as a whole. image is reshaped for this. #60, 10 are values selected by cross-validation l = local_binary_pattern(gray, 60, 10) h_gradient = sobel_h(gray) v_gradient = sobel_v(gray) #combine all 17 features together into 1 feature_vector #The 9th(0 index) feature - "v" - does not have 385 entries (only about 100 or so; don't know why). So I am not including that. #, reshape((feature[0])[n][:9], (1, (feature[0])[n][:9].size)) all_features = vstack((reshape((feature[0])[n][:,0], (1, 385)), reshape((feature[0])[n][:,1], (1, 385)), reshape((feature[0])[n][:,2], (1, 385)), reshape((feature[0])[n][:,3], (1, 385)), reshape((feature[0])[n][:,4], (1, 385)), reshape((feature[0])[n][:,5], (1, 385)), reshape((feature[0])[n][:,6], (1, 385)), reshape((feature[0])[n][:,7], (1, 385)), reshape((feature[0])[n][:,8], (1, 385)), reshape((feature[0])[n][:,10], (1, 385)), reshape((feature[0])[n][:,11], (1, 385)), reshape((feature[0])[n][:,12], (1, 385)), reshape((feature[0])[n][:,13], (1, 385)), reshape((feature[0])[n][:,14], (1, 385)), reshape(l,(1, 385)), reshape(h_gradient, (1, 385)) , reshape(v_gradient, (1, 385)) )) all_features = all_features.T if n!=0: all_super_features = vstack((all_super_features, all_features)) else: all_super_features = all_features #save the new feature vector with 17 features. "v" is not included feature_explanation = ["r", "g", "b", "nr", "ng", "o1", "o2", "h", "s", "l", "a", "b", "x", "y", "texture_lbp", "h_gradient", "v_gradient"] params_dict = {} params_dict['feature_vector'] = all_super_features params_dict['feature_explanation'] = feature_explanation params_dict['target'] = target params_dict['feature_dim'] = [17] params_dict['total_pic'] = [num_of_files] save_path = os.path.join(input_fldr, "%s_data" % file_heading) savemat(save_path, params_dict) if __name__ == '__main__': #change the path; i know this is sloppy #the um_data and the other files are the files with the 15 features: (including x, y) UM = loadmat("C:\Users\Joms\Desktop\um\um_data.mat") make_feature_vector(UM, 95, "C:\Users\Joms\Desktop\um", "um_all") UMM = loadmat("C:\Users\Joms\Desktop\umm\umm_data.mat") make_feature_vector(UMM, 96, "C:\Users\Joms\Desktop\umm", "umm_all") UU = loadmat("C:\Users\Joms\Desktop\um\uu_data.mat") make_feature_vector(UU, 98, "C:\Users\Joms\Desktop\uu", "uu_all")
mit
alekz112/statsmodels
statsmodels/examples/ex_outliers_influence.py
34
3906
from __future__ import print_function import numpy as np import statsmodels.stats.outliers_influence as oi if __name__ == '__main__': import statsmodels.api as sm data = np.array('''\ 64 57 8 71 59 10 53 49 6 67 62 11 55 51 8 58 50 7 77 55 10 57 48 9 56 42 10 51 42 6 76 61 12 68 57 9'''.split(), float).reshape(-1,3) varnames = 'weight height age'.split() endog = data[:,0] exog = sm.add_constant(data[:,2]) res_ols = sm.OLS(endog, exog).fit() hh = (res_ols.model.exog * res_ols.model.pinv_wexog.T).sum(1) x = res_ols.model.exog hh_check = np.diag(np.dot(x, np.dot(res_ols.model.normalized_cov_params, x.T))) from numpy.testing import assert_almost_equal assert_almost_equal(hh, hh_check, decimal=13) res = res_ols #alias #http://en.wikipedia.org/wiki/PRESS_statistic #predicted residuals, leave one out predicted residuals resid_press = res.resid / (1-hh) ess_press = np.dot(resid_press, resid_press) sigma2_est = np.sqrt(res.mse_resid) #can be replace by different estimators of sigma sigma_est = np.sqrt(sigma2_est) resid_studentized = res.resid / sigma_est / np.sqrt(1 - hh) #http://en.wikipedia.org/wiki/DFFITS: dffits = resid_studentized * np.sqrt(hh / (1 - hh)) nobs, k_vars = res.model.exog.shape #Belsley, Kuh and Welsch (1980) suggest a threshold for abs(DFFITS) dffits_threshold = 2 * np.sqrt(k_vars/nobs) res_ols.df_modelwc = res_ols.df_model + 1 n_params = res.model.exog.shape[1] #http://en.wikipedia.org/wiki/Cook%27s_distance cooks_d = res.resid**2 / sigma2_est / res_ols.df_modelwc * hh / (1 - hh)**2 #or #Eubank p.93, 94 cooks_d2 = resid_studentized**2 / res_ols.df_modelwc * hh / (1 - hh) #threshold if normal, also Wikipedia from scipy import stats alpha = 0.1 #df looks wrong print(stats.f.isf(1-alpha, n_params, res.df_resid)) print(stats.f.sf(cooks_d, n_params, res.df_resid)) print('Cooks Distance') print(cooks_d) print(cooks_d2) doplot = 0 if doplot: import matplotlib.pyplot as plt fig = plt.figure() # ax = fig.add_subplot(3,1,1) # plt.plot(andrew_results.weights, 'o', label='rlm weights') # plt.legend(loc='lower left') ax = fig.add_subplot(3,1,2) plt.plot(cooks_d, 'o', label="Cook's distance") plt.legend(loc='upper left') ax2 = fig.add_subplot(3,1,3) plt.plot(resid_studentized, 'o', label='studentized_resid') plt.plot(dffits, 'o', label='DFFITS') leg = plt.legend(loc='lower left', fancybox=True) leg.get_frame().set_alpha(0.5) #, fontsize='small') ltext = leg.get_texts() # all the text.Text instance in the legend plt.setp(ltext, fontsize='small') # the legend text fontsize print(oi.reset_ramsey(res, degree=3)) #note, constant in last column for i in range(1): print(oi.variance_inflation_factor(res.model.exog, i)) infl = oi.OLSInfluence(res_ols) print(infl.resid_studentized_external) print(infl.resid_studentized_internal) print(infl.summary_table()) print(oi.summary_table(res, alpha=0.05)[0]) ''' >>> res.resid array([ 4.28571429, 4. , 0.57142857, -3.64285714, -4.71428571, 1.92857143, 10. , -6.35714286, -11. , -1.42857143, 1.71428571, 4.64285714]) >>> infl.hat_matrix_diag array([ 0.10084034, 0.11764706, 0.28571429, 0.20168067, 0.10084034, 0.16806723, 0.11764706, 0.08403361, 0.11764706, 0.28571429, 0.33613445, 0.08403361]) >>> infl.resid_press array([ 4.76635514, 4.53333333, 0.8 , -4.56315789, -5.24299065, 2.31818182, 11.33333333, -6.94036697, -12.46666667, -2. , 2.58227848, 5.06880734]) >>> infl.ess_press 465.98646628086374 '''
bsd-3-clause
jiaphuan/models
research/object_detection/dataset_tools/oid_tfrecord_creation_test.py
2
7392
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for oid_tfrecord_creation.py.""" import os import contextlib2 import pandas as pd import tensorflow as tf from object_detection.dataset_tools import oid_tfrecord_creation def create_test_data(): data = { 'ImageID': ['i1', 'i1', 'i1', 'i1', 'i2', 'i2'], 'LabelName': ['a', 'a', 'b', 'b', 'b', 'c'], 'YMin': [0.3, 0.6, 0.8, 0.1, 0.0, 0.0], 'XMin': [0.1, 0.3, 0.7, 0.0, 0.1, 0.1], 'XMax': [0.2, 0.3, 0.8, 0.5, 0.9, 0.9], 'YMax': [0.3, 0.6, 1, 0.8, 0.8, 0.8], 'IsOccluded': [0, 1, 1, 0, 0, 0], 'IsTruncated': [0, 0, 0, 1, 0, 0], 'IsGroupOf': [0, 0, 0, 0, 0, 1], 'IsDepiction': [1, 0, 0, 0, 0, 0], } df = pd.DataFrame(data=data) label_map = {'a': 0, 'b': 1, 'c': 2} return label_map, df class TfExampleFromAnnotationsDataFrameTests(tf.test.TestCase): def test_simple(self): label_map, df = create_test_data() tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( df[df.ImageID == 'i1'], label_map, 'encoded_image_test') self.assertProtoEquals(""" features { feature { key: "image/encoded" value { bytes_list { value: "encoded_image_test" } } } feature { key: "image/filename" value { bytes_list { value: "i1.jpg" } } } feature { key: "image/object/bbox/ymin" value { float_list { value: [0.3, 0.6, 0.8, 0.1] } } } feature { key: "image/object/bbox/xmin" value { float_list { value: [0.1, 0.3, 0.7, 0.0] } } } feature { key: "image/object/bbox/ymax" value { float_list { value: [0.3, 0.6, 1.0, 0.8] } } } feature { key: "image/object/bbox/xmax" value { float_list { value: [0.2, 0.3, 0.8, 0.5] } } } feature { key: "image/object/class/label" value { int64_list { value: [0, 0, 1, 1] } } } feature { key: "image/object/class/text" value { bytes_list { value: ["a", "a", "b", "b"] } } } feature { key: "image/source_id" value { bytes_list { value: "i1" } } } feature { key: "image/object/depiction" value { int64_list { value: [1, 0, 0, 0] } } } feature { key: "image/object/group_of" value { int64_list { value: [0, 0, 0, 0] } } } feature { key: "image/object/occluded" value { int64_list { value: [0, 1, 1, 0] } } } feature { key: "image/object/truncated" value { int64_list { value: [0, 0, 0, 1] } } } } """, tf_example) def test_no_attributes(self): label_map, df = create_test_data() del df['IsDepiction'] del df['IsGroupOf'] del df['IsOccluded'] del df['IsTruncated'] tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( df[df.ImageID == 'i2'], label_map, 'encoded_image_test') self.assertProtoEquals(""" features { feature { key: "image/encoded" value { bytes_list { value: "encoded_image_test" } } } feature { key: "image/filename" value { bytes_list { value: "i2.jpg" } } } feature { key: "image/object/bbox/ymin" value { float_list { value: [0.0, 0.0] } } } feature { key: "image/object/bbox/xmin" value { float_list { value: [0.1, 0.1] } } } feature { key: "image/object/bbox/ymax" value { float_list { value: [0.8, 0.8] } } } feature { key: "image/object/bbox/xmax" value { float_list { value: [0.9, 0.9] } } } feature { key: "image/object/class/label" value { int64_list { value: [1, 2] } } } feature { key: "image/object/class/text" value { bytes_list { value: ["b", "c"] } } } feature { key: "image/source_id" value { bytes_list { value: "i2" } } } } """, tf_example) def test_label_filtering(self): label_map, df = create_test_data() label_map = {'a': 0} tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( df[df.ImageID == 'i1'], label_map, 'encoded_image_test') self.assertProtoEquals(""" features { feature { key: "image/encoded" value { bytes_list { value: "encoded_image_test" } } } feature { key: "image/filename" value { bytes_list { value: "i1.jpg" } } } feature { key: "image/object/bbox/ymin" value { float_list { value: [0.3, 0.6] } } } feature { key: "image/object/bbox/xmin" value { float_list { value: [0.1, 0.3] } } } feature { key: "image/object/bbox/ymax" value { float_list { value: [0.3, 0.6] } } } feature { key: "image/object/bbox/xmax" value { float_list { value: [0.2, 0.3] } } } feature { key: "image/object/class/label" value { int64_list { value: [0, 0] } } } feature { key: "image/object/class/text" value { bytes_list { value: ["a", "a"] } } } feature { key: "image/source_id" value { bytes_list { value: "i1" } } } feature { key: "image/object/depiction" value { int64_list { value: [1, 0] } } } feature { key: "image/object/group_of" value { int64_list { value: [0, 0] } } } feature { key: "image/object/occluded" value { int64_list { value: [0, 1] } } } feature { key: "image/object/truncated" value { int64_list { value: [0, 0] } } } } """, tf_example) class OpenOutputTfrecordsTests(tf.test.TestCase): def test_sharded_tfrecord_writes(self): with contextlib2.ExitStack() as tf_record_close_stack: output_tfrecords = oid_tfrecord_creation.open_sharded_output_tfrecords( tf_record_close_stack, os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10) for idx in range(10): output_tfrecords[idx].write('test_{}'.format(idx)) for idx in range(10): tf_record_path = '{}-{:05d}-of-00010'.format( os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx) records = list(tf.python_io.tf_record_iterator(tf_record_path)) self.assertAllEqual(records, ['test_{}'.format(idx)]) if __name__ == '__main__': tf.test.main()
apache-2.0
aliciawyy/CompInvest
bollinger_band.py
1
2126
""" This file contains the function to compute the bollinger bands for a certain symbole. @author Alicia Wang @date 30 oct 2014 """ import datetime as dt import matplotlib.pyplot as plt import pandas.stats.moments as ts from pandas import DataFrame # Internal Import from portfolio import BasicPortfolio def compute_bollinger_band(basic_portfolio, period, source='yahoo', filename=None): """ Compute the bollinger band for a list of stocks. @param basic_portfolio: A basic portfolio instance @param period: @param source: source to get the data @param filename: @return: """ assert isinstance(basic_portfolio, BasicPortfolio) stock_close_prices = basic_portfolio.get_stock_close_prices(source) basic_portfolio.print_information() print 'Lookback period : ', period bol_mean = ts.rolling_mean(stock_close_prices, period) bol_std = ts.rolling_std(stock_close_prices, period) bollinger_band_up = bol_mean + bol_std bollinger_band_down = bol_mean - bol_std plt.clf() plt.plot(stock_close_prices.index, stock_close_prices.values) plt.plot(stock_close_prices.index, bollinger_band_up) plt.plot(stock_close_prices.index, bollinger_band_down) plt.legend(['Stock adjusted price', 'Bollinger band', 'Bollinger band']) plt.ylabel('Price') plt.xlabel('Date') if filename is not None: plt.savefig(filename, format='pdf') else: plt.show() bol_val = (stock_close_prices - bol_mean)/bol_std val = DataFrame(bol_val, index=stock_close_prices.index, columns=basic_portfolio.tickers) # print val[-5:] val.to_csv('result/bol.csv') # return the bollinger value return val def test(): """Main Function""" ls_symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT'] start_date = dt.datetime(2010, 1, 1) end_date = dt.datetime(2010, 12, 31) basic_portfolio = BasicPortfolio(ls_symbols, start_date, end_date) period = 20 compute_bollinger_band(basic_portfolio, period, source='local') if __name__ == '__main__': test()
mit
robbymeals/scikit-learn
examples/linear_model/plot_polynomial_interpolation.py
251
1895
#!/usr/bin/env python """ ======================== Polynomial interpolation ======================== This example demonstrates how to approximate a function with a polynomial of degree n_degree by using ridge regression. Concretely, from n_samples 1d points, it suffices to build the Vandermonde matrix, which is n_samples x n_degree+1 and has the following form: [[1, x_1, x_1 ** 2, x_1 ** 3, ...], [1, x_2, x_2 ** 2, x_2 ** 3, ...], ...] Intuitively, this matrix can be interpreted as a matrix of pseudo features (the points raised to some power). The matrix is akin to (but different from) the matrix induced by a polynomial kernel. This example shows that you can do non-linear regression with a linear model, using a pipeline to add non-linear features. Kernel methods extend this idea and can induce very high (even infinite) dimensional feature spaces. """ print(__doc__) # Author: Mathieu Blondel # Jake Vanderplas # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline def f(x): """ function to approximate by polynomial interpolation""" return x * np.sin(x) # generate points used to plot x_plot = np.linspace(0, 10, 100) # generate points and keep a subset of them x = np.linspace(0, 10, 100) rng = np.random.RandomState(0) rng.shuffle(x) x = np.sort(x[:20]) y = f(x) # create matrix versions of these arrays X = x[:, np.newaxis] X_plot = x_plot[:, np.newaxis] plt.plot(x_plot, f(x_plot), label="ground truth") plt.scatter(x, y, label="training points") for degree in [3, 4, 5]: model = make_pipeline(PolynomialFeatures(degree), Ridge()) model.fit(X, y) y_plot = model.predict(X_plot) plt.plot(x_plot, y_plot, label="degree %d" % degree) plt.legend(loc='lower left') plt.show()
bsd-3-clause
ralbayaty/KaggleRetina
testing/rbm_logistic_classification.py
1
3351
from __future__ import print_function print(__doc__) # Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve # License: BSD import numpy as np import matplotlib.pyplot as plt from scipy.ndimage import convolve from sklearn import linear_model, datasets, metrics from sklearn.cross_validation import train_test_split from sklearn.neural_network import BernoulliRBM from sklearn.pipeline import Pipeline ############################################################################### # Setting up def nudge_dataset(X, Y): """ This produces a dataset 5 times bigger than the original one, by moving the 8x8 images in X around by 1px to left, right, down, up """ direction_vectors = [ [[0, 1, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 1], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 1, 0]]] shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant', weights=w).ravel() X = np.concatenate([X] + [np.apply_along_axis(shift, 1, X, vector) for vector in direction_vectors]) Y = np.concatenate([Y for _ in range(5)], axis=0) return X, Y # Load Data digits = datasets.load_digits() X = np.asarray(digits.data, 'float32') X, Y = nudge_dataset(X, digits.target) X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) # Models we will use logistic = linear_model.LogisticRegression() rbm = BernoulliRBM(random_state=0, verbose=True) classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)]) ############################################################################### # Training # Hyper-parameters. These were set by cross-validation, # using a GridSearchCV. Here we are not performing cross-validation to # save time. rbm.learning_rate = 0.06 rbm.n_iter = 20 # More components tend to give better prediction performance, but larger # fitting time rbm.n_components = 100 logistic.C = 6000.0 # Training RBM-Logistic Pipeline classifier.fit(X_train, Y_train) # Training Logistic regression logistic_classifier = linear_model.LogisticRegression(C=100.0) logistic_classifier.fit(X_train, Y_train) ############################################################################### # Evaluation print() print("Logistic regression using RBM features:\n%s\n" % ( metrics.classification_report( Y_test, classifier.predict(X_test)))) print("Logistic regression using raw pixel features:\n%s\n" % ( metrics.classification_report( Y_test, logistic_classifier.predict(X_test)))) ############################################################################### # Plotting plt.figure(figsize=(4.2, 4)) for i, comp in enumerate(rbm.components_): plt.subplot(10, 10, i + 1) plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('100 components extracted by RBM', fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show()
gpl-2.0
justacec/bokeh
examples/plotting/file/boxplot.py
3
2304
import numpy as np import pandas as pd from bokeh.plotting import figure, show, output_file # generate some synthetic time series for six different categories cats = list("abcdef") yy = np.random.randn(2000) g = np.random.choice(cats, 2000) for i, l in enumerate(cats): yy[g == l] += i // 2 df = pd.DataFrame(dict(score=yy, group=g)) # find the quartiles and IQR for each category groups = df.groupby('group') q1 = groups.quantile(q=0.25) q2 = groups.quantile(q=0.5) q3 = groups.quantile(q=0.75) iqr = q3 - q1 upper = q3 + 1.5*iqr lower = q1 - 1.5*iqr # find the outliers for each category def outliers(group): cat = group.name return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score'] out = groups.apply(outliers).dropna() # prepare outlier data for plotting, we need coordinates for every outlier. outx = [] outy = [] for cat in cats: # only add outliers if they exist if not out.loc[cat].empty: for value in out[cat]: outx.append(cat) outy.append(value) p = figure(tools="save", background_fill_color="#EFE8E2", title="", x_range=cats) # if no outliers, shrink lengths of stems to be no longer than the minimums or maximums qmin = groups.quantile(q=0.00) qmax = groups.quantile(q=1.00) upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ] lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ] # stems p.segment(cats, upper.score, cats, q3.score, line_width=2, line_color="black") p.segment(cats, lower.score, cats, q1.score, line_width=2, line_color="black") # boxes p.rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score, fill_color="#E08E79", line_width=2, line_color="black") p.rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score, fill_color="#3B8686", line_width=2, line_color="black") # whiskers (almost-0 height rects simpler than segments) p.rect(cats, lower.score, 0.2, 0.01, line_color="black") p.rect(cats, upper.score, 0.2, 0.01, line_color="black") # outliers p.circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6) p.xgrid.grid_line_color = None p.ygrid.grid_line_color = "white" p.grid.grid_line_width = 2 p.xaxis.major_label_text_font_size="12pt" output_file("boxplot.html", title="boxplot.py example") show(p)
bsd-3-clause
logston/plottags
setup.py
1
1160
from setuptools import setup import plottags with open("LICENSE") as fd: LICENSE = fd.read() with open("README.rst") as fd: README = fd.read() setup( name='plottags', version=plottags.__version__, description='A package for plotting the tag history of repositories', license=LICENSE, long_description=README, author=plottags.__author__, author_email=plottags.__email__, url='https://github.com/logston/plottags', packages=['plottags'], include_package_data=True, test_suite='tests', keywords=['repository', 'git', 'hg', 'mercurial', 'plot', 'tag', 'tags'], entry_points={ 'console_scripts': ['plottags=plottags.controller:main'], }, install_requires=[ 'matplotlib>=1.4.2', ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Utilities', ], )
bsd-3-clause
quasiben/bokeh
bokeh/charts/builders/chord_builder.py
2
12294
"""This is the Bokeh charts interface. It gives you a high level API to build complex plot is a simple way. This is the Chord class which lets you build your Chord charts just passing the arguments to the Chart class and calling the proper functions. """ # ----------------------------------------------------------------------------- # Copyright (c) 2012 - 2016, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- from __future__ import absolute_import import numpy as np import pandas as pd from math import cos, sin, pi from bokeh.charts.properties import Dimension from bokeh.charts.builder import create_and_build, Builder from bokeh.charts.attributes import MarkerAttr, ColorAttr, CatAttr from bokeh.charts.utils import color_in_equal_space, help from bokeh.models import Range1d from bokeh.models.glyphs import Arc, Bezier, Text from bokeh.models.renderers import GlyphRenderer from bokeh.models.sources import ColumnDataSource from bokeh.core.properties import Instance, Bool, String, Array, Float, Any, Seq, Either, Int # ----------------------------------------------------------------------------- # Classes and functions # ----------------------------------------------------------------------------- class Area: """ It represents an arc area. It will create a list of available points through the arc representing that area and then those points will be used as start and end for the beziers lines. """ def __init__(self, n_conn, start_point, end_point): # Number of connections in that arc area self.n_conn = n_conn # The start point of the arc representing the area self.start_point = start_point self.end_point = end_point # Equally spaced points between start point and end point free_points_angles = np.linspace(start_point, end_point, n_conn) # A list of available X,Y in the chart to consume by each bezier's start and end point self.free_points = [[cos(angle), sin(angle)] for angle in free_points_angles] assert self.n_conn == len(self.free_points) class ChordBuilder(Builder): """ This is the Chord builder and it is in charge of plotting Chord graphs in an easy and intuitive way. Essentially, we provide a way to ingest the data, make the proper calculations and push the references into a source object. We additionally make calculations for the ranges. And finally add the needed glyphs (markers) taking the references from the source. """ default_attributes = {'color': ColorAttr(), 'marker': MarkerAttr(), 'stack': CatAttr()} dimensions = ['values'] values = Dimension('values') arcs_data = Instance(ColumnDataSource) text_data = Instance(ColumnDataSource) connection_data = Instance(ColumnDataSource) origin = String() destination = String() value = Any() square_matrix = Bool() label = Seq(Any()) matrix = Array(Array(Either(Float(), Int()))) def set_ranges(self): rng = 1.1 if not self.label else 1.8 self.x_range = Range1d(-rng, rng) self.y_range = Range1d(-rng, rng) def setup(self): # Process only if not a square_matrix if not self.square_matrix: source = self.values._data[self.origin] target = self.values._data[self.destination] union = source.append(target).unique() N = union.shape[0] m = pd.DataFrame(np.zeros((N, N)), columns=union, index=union) if not self.label: self.label = list(union) if self.value is None: for _, row in self.values._data.iterrows(): m[row[self.origin]][row[self.destination]] += 1 self.matrix = m.get_values() if self.value is not None: if isinstance(self.value, int) or isinstance(self.value, float): for _, row in self.values._data.iterrows(): m[row[self.origin]][row[self.destination]] = self.value self.matrix = m.get_values() elif isinstance(self.value, str): for _, row in self.values._data.iterrows(): m[row[self.origin]][row[self.destination]] = row[self.value] self.matrix = m.get_values().T else: # It's already a square matrix self.matrix = self._data.df.get_values() if self.label: assert len(self.label) == self.matrix.shape[0] def process_data(self): weights_of_areas = (self.matrix.sum(axis=0) + self.matrix.sum(axis=1)) - self.matrix.diagonal() areas_in_radians = (weights_of_areas / weights_of_areas.sum()) * (2 * pi) # We add a zero in the begging for the cumulative sum points = np.zeros((areas_in_radians.shape[0] + 1)) points[1:] = areas_in_radians points = points.cumsum() colors = [color_in_equal_space(area / areas_in_radians.shape[0]) for area in range(areas_in_radians.shape[0])] arcs_data = pd.DataFrame({ 'start_angle': points[:-1], 'end_angle': points[1:], 'line_color': colors }) self.arcs_data = ColumnDataSource(arcs_data) # Text if self.label: text_radius = 1.1 angles = (points[:-1]+points[1:])/2.0 text_positions = pd.DataFrame({ 'angles': angles, 'text_x': np.cos(angles) * text_radius, 'text_y': np.sin(angles) * text_radius, 'text': list(self.label) }) self.text_data = ColumnDataSource(text_positions) # Lines all_areas = [] for i in range(areas_in_radians.shape[0]): all_areas.append(Area(weights_of_areas[i], points[:-1][i], points[1:][i])) all_connections = [] for j, region1 in enumerate(self.matrix): # Get the connections origin region source = all_areas[j] color = colors[j] weight = weights_of_areas[j] for k, region2 in enumerate(region1): # Get the connection destination region target = all_areas[k] for _ in range(int(region2)): p1 = source.free_points.pop() p2 = target.free_points.pop() # Get both regions free points and create a connection with the data all_connections.append(p1 + p2 + [color, weight]) connections_df = pd.DataFrame(all_connections, dtype=str) connections_df.columns = ["start_x", "start_y", "end_x", "end_y", "colors", "weight"] connections_df["cx0"] = connections_df.start_x.astype("float64")/2 connections_df["cy0"] = connections_df.start_y.astype("float64")/2 connections_df["cx1"] = connections_df.end_x.astype("float64")/2 connections_df["cy1"] = connections_df.end_y.astype("float64")/2 connections_df.weight = (connections_df.weight.astype("float64")/connections_df.weight.astype("float64").sum()) * 3000 self.connection_data = ColumnDataSource(connections_df) def yield_renderers(self): """Use the marker glyphs to display the arcs and beziers. Takes reference points from data loaded at the ColumnDataSource. """ beziers = Bezier(x0='start_x', y0='start_y', x1='end_x', y1='end_y', cx0='cx0', cy0='cy0', cx1='cx1', cy1='cy1', line_alpha='weight', line_color='colors') yield GlyphRenderer(data_source=self.connection_data, glyph=beziers) arcs = Arc(x=0, y=0, radius=1, line_width=10, start_angle='start_angle', end_angle='end_angle', line_color='line_color') yield GlyphRenderer(data_source=self.arcs_data, glyph=arcs) if self.label: text_props = { "text_color": "#000000", "text_font_size": "8pt", "text_align": "left", "text_baseline": "middle" } labels = Text(x='text_x', y='text_y', text='text', angle='angles', **text_props ) yield GlyphRenderer(data_source=self.text_data, glyph=labels) @help(ChordBuilder) def Chord(data, source=None, target=None, value=None, square_matrix=False, label=None, xgrid=False, ygrid=False, **kw): """ Create a chord chart using :class:`ChordBuilder <bokeh.charts.builders.chord_builder.ChordBuilder>` to render a chord graph from a variety of value forms. This chart displays the inter-relationships between data in a matrix. The data can be generated by the chart interface. Given a :class:`DataFrame <pandas.DataFrame>`, select two columns to be used as arcs with `source` and `target` attributes, passing by the name of those columns. The :class:`Chord <bokeh.charts.builders.chord_builder.Chord>` chart will then deduce the relationship between the arcs. The value of the connections can be inferred automatically by counting `source` and `target`. If you prefer you can assign a fixed value for all the connections with `value` simply passing by a number. A third option is to pass a reference to a third column in the :class:`DataFrame <pandas.DataFrame>` with the values for the connections. If you want to plot the relationships in a squared matrix, simply pass the matrix and set `square_matrix` attribute to `True`. Reference: `Chord diagram on Wikipedia <https://en.wikipedia.org/wiki/Chord_diagram>`_ Args: data (:ref:`userguide_charts_data_types`): the data source for the chart. source (list(str) or str, optional): Data source to use as origin of the connection to a destination. target (list(str) or str, optional): Data source to use as destination of a connection. value (list(num) or num, optional): The value the connection should have. square_matrix (bool, optional): If square matrix, avoid any calculations during the setup. label (list(str), optional): The labels to be put in the areas. Returns: :class:`Chart`: includes glyph renderers that generate the chord Examples: .. bokeh-plot:: :source-position: above import pandas as pd from bokeh.charts import Chord from bokeh.io import show, output_file from bokeh.sampledata.les_mis import data nodes = data['nodes'] links = data['links'] nodes_df = pd.DataFrame(nodes) links_df = pd.DataFrame(links) source_data = links_df.merge(nodes_df, how='left', left_on='source', right_index=True) source_data = source_data.merge(nodes_df, how='left', left_on='target', right_index=True) source_data = source_data[source_data["value"] > 5] # Select those with 5 or more connections chord_from_df = Chord(source_data, source="name_x", target="name_y", value="value") output_file('chord_from_df.html') show(chord_from_df) """ kw["origin"] = source kw["destination"] = target kw["value"] = value kw["square_matrix"] = square_matrix kw["label"] = label kw['xgrid'] = xgrid kw['ygrid'] = ygrid chart = create_and_build(ChordBuilder, data, **kw) chart.left[0].visible = False chart.below[0].visible = False chart.outline_line_color = None return chart
bsd-3-clause
akionakamura/scikit-learn
examples/linear_model/plot_sgd_separating_hyperplane.py
260
1219
""" ========================================= SGD: Maximum margin separating hyperplane ========================================= Plot the maximum margin separating hyperplane within a two-class separable dataset using a linear Support Vector Machines classifier trained using SGD. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import SGDClassifier from sklearn.datasets.samples_generator import make_blobs # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([x1, x2]) Z[i, j] = p[0] levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.axis('tight') plt.show()
bsd-3-clause
davidenitti/ML
RL/agent/run.py
1
12234
''' @author: Davide Nitti ''' from . import common from . import default_params from . import torchagent from . import agent_utils from . import env_utils import time import numpy as np import gym import gym.spaces from multiprocessing import Process import logging import argparse import os import json logger = logging.getLogger(__name__) def loadparams(filename): with open(filename + ".json", "r") as input_file: out = json.load(input_file) return out def getparams(params): parser = argparse.ArgumentParser() parser.add_argument('--name_exp', default="") parser.add_argument('--res_dir', default="out_dir") parser.add_argument('--target', default="BreakoutDeterministic-v4") # LunarLander-v2 Breakout-v0 parser.add_argument('--episodes', type=int, default=1000000) parser.add_argument('--plot', action='store_true', default=True, help='plot') parser.add_argument('--render', action='store_true', help='render') parser.add_argument('--monitor', action='store_true', help='monitor') parser.add_argument('--logging', default='INFO') parser.add_argument('--no_cuda', action='store_false', dest='use_cuda', default=True, help='disable cuda') parser.add_argument('--save_mem', action='store_true', help='save memory') args = parser.parse_args(params) options = vars(args) if options["name_exp"] != "": if not os.path.exists(args.res_dir): os.makedirs(args.res_dir) options["path_exp"] = os.path.join(options["res_dir"], options["name_exp"]) else: options["path_exp"] = None if options["path_exp"] and os.path.exists(options["path_exp"] + ".json"): params = loadparams(options["path_exp"]) # only this parameters are taken from args params['monitor'] = options['monitor'] params['plot'] = options['plot'] params['render'] = options['render'] params['use_cuda'] = options['use_cuda'] params['save_mem'] = options['save_mem'] params['logging'] = options['logging'] else: params = default_params.get_default(options['target']) params.update(options) return params def start_process(func, args): p = Process(target=func, args=args) p.start() return p def upload_res(callback, process_upload=None, upload_checkpoint=False, parallel=True): if callback is None: return None print('uploading') if parallel: if process_upload is not None: process_upload.join() process_upload.terminate() process_upload = start_process(callback, (upload_checkpoint,)) else: try: callback(upload_checkpoint) except Exception as e: print(str(e)) return process_upload def main(params=[], callback=None, upload_ckp=False, numavg=100, sleep=0.0): params = getparams(params) logger.info('Params' + str(params)) if params['plot'] != True: import matplotlib matplotlib.use('pdf') else: import matplotlib # matplotlib.use('Agg') # matplotlib.use("Qt5agg") import matplotlib.pyplot as plt plt.rcParams['image.interpolation'] = 'nearest' nameenv = params['target'] reward_threshold = gym.envs.registry.spec(nameenv).reward_threshold if 'baseline_env' in params and params['baseline_env']: if params['path_exp']: stats_path = os.path.join(params["res_dir"], 'stats') if not os.path.exists(stats_path): os.makedirs(stats_path) else: stats_path = None env = env_utils.build_env(nameenv, env_type=None, num_env=1, batch=False, seed=params["seed"], reward_scale=params['scalereward'], gamestate=None, logger_dir=stats_path) reward_range = env.reward_range#env.envs[0].reward_range else: env = gym.make(nameenv) reward_range = env.reward_range if params['seed'] is not None: env.seed(params["seed"]) if params['monitor'] == True: # store performance and video from gym import wrappers env = wrappers.Monitor(env, os.path.join(params['res_dir'], 'video'), force=True) if params["path_exp"]: log_file = params["path_exp"] + '.log' else: log_file = None common.init_logger(log_file, params['logging']) logger.info('start RL agent') logger.info('params ' + str(params)) logger.info(str( (env.observation_space, env.action_space, 'max_episode_steps', env.spec.max_episode_steps, reward_range))) for p in params: logger.debug(p + " " + str(params[p])) if params["seed"] is not None: np.random.seed(params["seed"]) logger.debug("seed " + str(params["seed"])) try: agent = torchagent.deepQconv(env.observation_space, env.action_space, reward_range, params) num_steps = env.spec.max_episode_steps avg = None process_upload = None if params['plot']: plt.ion() totrewlist = [] test_rew_epis = [[], []] test_rew_smooth = [] total_rew_discountlist = [] testevery = 25 useConv = agent.config['conv'] max_total_rew_discount = float("-inf") max_abs_rew_discount = float("-inf") total_steps = 0 print(agent.config) if 'final_episode' in agent.config: start_episode = agent.config['final_episode'] + 1 else: start_episode = 1 for episode in range(start_episode, params['episodes']): if (episode) % testevery == 0 or episode >= params['episodes'] - numavg: is_test = True else: is_test = False if is_test: render = (params['render']) eps = -1 learn = False print(agent.config["path_exp"], 'episode', episode, 'l rate', agent.getlearnrate(), 'lambda', agent.config['lambda']) else: render = False learn = True eps = episode startt = time.time() total_rew, steps, total_rew_discount, max_qval = agent_utils.do_rollout(agent, env, eps, num_steps=num_steps, render=render, useConv=useConv, discount=agent.config["discount"], sleep=sleep, learn=learn) stopt = time.time() max_total_rew_discount = max(max_total_rew_discount, total_rew_discount) max_abs_rew_discount = max(max_abs_rew_discount, abs(total_rew_discount)) total_steps += steps if ((max_qval - max_total_rew_discount) / max_abs_rew_discount > 0.9): logger.warning("Q function too high: max rew disc {:.3f}" " max Q {:.3f} rel error {:.3f}".format( max_total_rew_discount, max_qval, (max_qval - max_total_rew_discount) / max_abs_rew_discount)) if avg is None: avg = total_rew if is_test: test_rew_epis[0].append(total_rew / agent.config['scalereward']) test_rew_epis[1].append(episode) inc = max(0.2, 0.05 + 1. / (episode) ** 0.5) avg = avg * (1 - inc) + inc * total_rew test_rew_smooth.append(avg / agent.config['scalereward']) if episode % 10 == 0: print(agent.config) totrewlist.append(total_rew / agent.config['scalereward']) total_rew_discountlist.append(total_rew_discount / agent.config['scalereward']) if (episode + 1 - start_episode) % 250 == 0: if agent.config["path_exp"] is not None: print("saving...") agent.config['final_episode'] = episode if 'results' not in agent.config: agent.config['results'] = {} if 'all_reward' not in agent.config['results']: agent.config['results']['all_reward'] = [] if 'all_reward_train' not in agent.config['results']: agent.config['results']['all_reward_train'] = {} if 'ep2updates' not in agent.config['results']: agent.config['results']['ep2updates'] = {} agent.config['results']['ep2updates'][str(episode)] = agent.config['num_updates'] if 'ep2steps' not in agent.config['results']: agent.config['results']['ep2steps'] = {} agent.config['results']['ep2steps'][str(episode)] = total_steps agent.config['results']['all_reward_train'][str(episode)] = np.mean(totrewlist[-100:]) if 'all_reward_test' not in agent.config['results']: agent.config['results']['all_reward_test'] = {} agent.config['results']['all_reward_test'][str(episode)] = np.mean(test_rew_epis[0][-10:]) agent.config['results']['num_updates'] = agent.config['num_updates'] agent.config['results']['episode'] = episode agent.config['results']['test_reward'] = np.mean(test_rew_epis[0][-10:]) agent.config['results']['train_reward'] = np.mean(totrewlist[-100:]) agent.config['results']['all_reward'] = [] # fixme if process_upload is not None: process_upload.join() try: agent.save() except KeyboardInterrupt: agent.save() exit() process_upload = upload_res(callback, process_upload, upload_ckp) logger.info( "episode {} t {:.2f}=100 steps {:6} reward {:.2f} disc_rew {:.2f} avg {:.2f}, avg100 {:.2f}, eps {:.3f} " \ "updates {:8} tot-steps {:8} epoch {:.1f} lr {:.5f}".format(episode, (stopt - startt) / steps * 100., \ steps, total_rew / agent.config['scalereward'], total_rew_discount / agent.config[ 'scalereward'], avg / agent.config['scalereward'], np.mean(np.array(totrewlist[-100:])), \ agent.epsilon(eps), agent.config['num_updates'], total_steps, agent.config['num_updates'] / 50000, agent.getlearnrate())) if is_test and params['plot']: agent.plot([], (totrewlist, test_rew_smooth, test_rew_epis), reward_threshold, plt, plot=params['plot'], numplot=1, start_episode=start_episode) print(agent.config) except Exception as e: print('Exception', e) env.close() raise e except KeyboardInterrupt: pass finally: env.close() return np.mean(totrewlist[-numavg:]), agent.config, totrewlist, test_rew_smooth, test_rew_epis, reward_threshold
gpl-3.0
maxalbert/blaze
blaze/compute/tests/test_hdfstore.py
14
1791
import pytest tables = pytest.importorskip('tables') from blaze.compute.hdfstore import * from blaze.utils import tmpfile from blaze import symbol, discover, compute import pandas as pd from datetime import datetime from odo import Chunks, resource, into import os try: f = pd.HDFStore('foo') except (RuntimeError, ImportError) as e: pytest.skip('skipping test_hdfstore.py %s' % e) else: f.close() os.remove('foo') df = pd.DataFrame([['a', 1, 10., datetime(2000, 1, 1)], ['ab', 2, 20., datetime(2000, 2, 2)], ['abc', 3, 30., datetime(2000, 3, 3)], ['abcd', 4, 40., datetime(2000, 4, 4)]], columns=['name', 'a', 'b', 'time']) def test_hdfstore(): with tmpfile('.hdf5') as fn: df.to_hdf(fn, '/appendable', format='table') df.to_hdf(fn, '/fixed') hdf = resource('hdfstore://%s' % fn) s = symbol('s', discover(hdf)) assert isinstance(compute(s.fixed, hdf), (pd.DataFrame, pd.io.pytables.Fixed)) assert isinstance(compute(s.appendable, hdf), (pd.io.pytables.AppendableFrameTable, Chunks)) s = symbol('s', discover(df)) f = resource('hdfstore://%s::/fixed' % fn) a = resource('hdfstore://%s::/appendable' % fn) assert isinstance(pre_compute(s, a), Chunks) hdf.close() f.parent.close() a.parent.close() def test_groups(): with tmpfile('.hdf5') as fn: df.to_hdf(fn, '/data/fixed') hdf = resource('hdfstore://%s' % fn) assert discover(hdf) == discover({'data': {'fixed': df}}) s = symbol('s', discover(hdf)) assert list(compute(s.data.fixed, hdf).a) == [1, 2, 3, 4] hdf.close()
bsd-3-clause
sql-machine-learning/sqlflow
python/runtime/xgboost/evaluate.py
1
5630
# Copyright 2020 The SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import sklearn.metrics import xgboost as xgb from runtime import db from runtime.dbapi.paiio import PaiIOConnection from runtime.feature.field_desc import DataType from runtime.model.metadata import load_metadata from runtime.xgboost.dataset import DMATRIX_FILE_SEP, xgb_dataset SKLEARN_METRICS = [ 'accuracy_score', 'average_precision_score', 'balanced_accuracy_score', 'brier_score_loss', 'cohen_kappa_score', 'explained_variance_score', 'f1_score', 'fbeta_score', 'hamming_loss', 'hinge_loss', 'log_loss', 'mean_absolute_error', 'mean_squared_error', 'mean_squared_log_error', 'median_absolute_error', 'precision_score', 'r2_score', 'recall_score', 'roc_auc_score', 'zero_one_loss', ] DEFAULT_PREDICT_BATCH_SIZE = 10000 def evaluate(datasource, select, feature_metas, feature_column_names, label_meta, result_table, validation_metrics=["accuracy_score"], is_pai=False, pai_table="", model_params=None, transform_fn=None, feature_column_code=""): if not is_pai: conn = db.connect_with_data_source(datasource) else: conn = PaiIOConnection.from_table(pai_table) dpred = xgb_dataset(datasource, 'predict.txt', select, feature_metas, feature_column_names, label_meta, is_pai, pai_table, True, True, batch_size=DEFAULT_PREDICT_BATCH_SIZE, transform_fn=transform_fn, feature_column_code=feature_column_code ) # NOTE: default to use external memory bst = xgb.Booster({'nthread': 4}) # init model bst.load_model("my_model") # load model if not model_params: model_params = load_metadata("model_meta.json")["attributes"] print("Start evaluating XGBoost model...") feature_file_id = 0 for pred_dmatrix in dpred: evaluate_and_store_result(bst, pred_dmatrix, feature_file_id, validation_metrics, model_params, feature_column_names, label_meta, is_pai, conn, result_table) feature_file_id += 1 print("Done evaluating. Result table : %s" % result_table) def evaluate_and_store_result(bst, dpred, feature_file_id, validation_metrics, model_params, feature_column_names, label_meta, is_pai, conn, result_table): preds = bst.predict(dpred) if model_params: obj = model_params["objective"] # binary:hinge output class labels if obj.startswith("binary:logistic"): preds = (preds > 0.5).astype(int) # multi:softmax output class labels elif obj.startswith("multi:softprob"): preds = np.argmax(np.array(preds), axis=1) # TODO(typhoonzero): deal with binary:logitraw when needed. else: # prediction output with multi-class job has two dimensions, this # is a temporary way, can remove this else branch when we can load # the model meta not only on PAI submitter. if len(preds.shape) == 2: preds = np.argmax(np.array(preds), axis=1) if is_pai: feature_file_read = open("predict.txt", "r") else: feature_file_read = open("predict.txt_%d" % feature_file_id, "r") y_test_list = [] for line in feature_file_read: row = [i for i in line.strip().split(DMATRIX_FILE_SEP)] # DMatrix store label in the first column if label_meta["dtype"] == "float32" or label_meta[ "dtype"] == DataType.FLOAT32: label = float(row[0]) elif label_meta["dtype"] == "int64" or label_meta[ "dtype"] == "int32" or label_meta["dtype"] == DataType.INT64: label = int(row[0]) else: raise ValueError("unsupported label dtype: %s" % label_meta["dtype"]) y_test_list.append(label) y_test = np.array(y_test_list) evaluate_results = dict() for metric_name in validation_metrics: if metric_name not in SKLEARN_METRICS: raise ValueError("unsupported metric: %s" % metric_name) metric_func = getattr(sklearn.metrics, metric_name) metric_value = metric_func(y_test, preds) evaluate_results[metric_name] = metric_value # write evaluation result to result table result_columns = ["loss"] + validation_metrics with db.buffered_db_writer(conn, result_table, result_columns, 100) as w: row = ["0.0"] for mn in validation_metrics: row.append(str(evaluate_results[mn])) w.write(row)
apache-2.0
Xenophyte/Long-Term-Memory-AI
functions.py
1
24584
import pylab import numpy as np from numpy.random import choice, randint from random import random, randint import random as rd from copy import deepcopy import networkx as nx from networkx.drawing.nx_agraph import graphviz_layout import matplotlib.pyplot as plt import math from time import time from multiprocessing import Pool import operator #library of elements; there are three types: value, function (+,-,*,/) or graph (which will be added at the end of the file) lib = [{"type":"value","symbol":"v"}, {"type":"function","function":lambda a:a[0]+a[1],"symbol":"+"}, {"type":"function","function":lambda a:a[0]-a[1],"symbol":"-"}, {"type":"function","function":lambda a:a[0]*a[1],"symbol":"*"}, {"type":"function","function":lambda a:a[0]/a[1],"symbol":"/"}] class Tree: def __init__(self,graph = None,values=None,variable_position=0): self.fitness = 1 if graph == None: self.graph = [] self.values = [] self.variable_position = 0 else: self.graph = graph if values == None: n_args = self.number_of_arguments() self.values = [random() for n in range(n_args)] else: self.values = values self.variable_position = variable_position #print("new graph created") #print(self.values) def clone(self): return Tree(self.graph) #function that takes in a graph and a list of input numbers and evaluates its result def evaluate(self,values,index=-1): #this case corresponds to if index == -1: index = self.find_output_node() node = self.graph[index] lib_entry = lib[node["lib_id"]] node_type = lib_entry["type"] if node_type == "value": return values[node["arg_index"]] else: args = [] for argument in node["input"]: arg = self.evaluate(values,argument) args.append(arg) if node_type == "function": return lib_entry["function"](args) elif node_type == "graph": return eval(lib_entry["graph"](args),args) #returns the position of the output node within the graph def find_output_node(self): referenced = [] for node in self.graph: referenced += node["input"] for n, node in enumerate(self.graph): if n not in referenced: return n #returns the number of arguments of a graph def number_of_arguments(self): n_arg = 0 for n, node in enumerate(self.graph): lib_entry = lib[node["lib_id"]] node_type = lib_entry["type"] if node_type == "value": n_arg += 1 return n_arg #returns a list with elements of the type [position of the argument in the graph,argument index in the argument vector] def input_info(self): args = [] for n, node in enumerate(self.graph): lib_entry = lib[node["lib_id"]] node_type = lib_entry["type"] if node_type == "value": args.append([n,node["arg_index"]]) n_args = len(args) arguments = [0 for i in range(n_args)] for arg in args: arguments[arg[1]] = arg[0] return arguments def merge_input_entries(self,i=-1,j=-1): n_args = self.number_of_arguments() if n_args <= 1: return Tree(self.graph) arg_vector = [k for k in range(0,n_args)] # if i is out of range pick a random value within range i = i if i in arg_vector else randint(0,n_args-1) # if j is out of range pick a random value within range which is different from i arg_vector.pop(i) j = j if j in arg_vector else choice(arg_vector) #index to keep min_index = min(sorted([i,j])) #index to replace with min_index max_index = max(sorted([i,j])) input_indexes = self.input_info() new_graph = deepcopy(self.graph) #remove node corresponding to max_index in the input vector kept_node_index = input_indexes[min_index] scrapped_node_index = input_indexes[max_index] del new_graph[scrapped_node_index] #update all references within graph: #create a mapping for the updated positions in the input vector input_mapping = [index if index < max_index else (min_index if index == max_index else index-1) for index in range(n_args)] #create a mapping for the updated positions in the graph if kept_node_index < scrapped_node_index: graph_mapping = [index if index < scrapped_node_index else (kept_node_index if index == scrapped_node_index else index-1) for index in range(len(self.graph))] elif kept_node_index > scrapped_node_index: graph_mapping = [index if index < scrapped_node_index else (kept_node_index - 1 if index == scrapped_node_index else index-1) for index in range(len(self.graph))] #update the references for node in new_graph: node["input"] = [ graph_mapping[n] for n in node["input"] ] for n in node["input"]: n = graph_mapping[n] #if it's an input node, update arg_index if len(node["input"]) == 0: node["arg_index"] = input_mapping[node["arg_index"]] return Tree(new_graph) #this function draws the graph def draw(self): G = nx.MultiDiGraph() for n, node in enumerate(self.graph): G.add_node(n,attr_dict=node) for n1, node in enumerate(self.graph): for i, n2 in enumerate(node["input"]): G.add_edge(n2, n1, attr_dict={"arg":i}) labels = {} for n, node in enumerate(G.nodes(data=True)): symbol = lib[node[1]["lib_id"]]["symbol"] labels[n] = symbol if symbol != "v" else node[1]["arg_index"] plt.close() nx.draw(G,labels = labels,with_labels = True,pos=nx.spring_layout(G)) plt.show() #plot a 1D graph of the function given some parameters def plot_specimen(self,values=None,x_range=[-10,10],variable_position=-1): #pylab.close() x = np.linspace(x_range[0],x_range[1],100) n_args = self.number_of_arguments() if values == None: values = self.values x_pos = variable_position if variable_position in range(n_args) else self.variable_position #input_size*number_of_data_points matrix that contains a list of input vectors like [parameter1, parameter2, x_value, ...] value_matrix = [[(xe if v == x_pos else value) for v, value in enumerate(values)] for xe in x] y = np.array([ self.evaluate(value_matrix[i]) for i in range(len(value_matrix)) ]) pylab.plot(x,y) pylab.show() #plot a 1D graph of the function given some parameters def plot_with_data(self,data,x_range=[-10,10]): #pylab.close() x = np.linspace(x_range[0],x_range[1],100) n_args = self.number_of_arguments() x_pos = self.variable_position values = self.values #input_size*number_of_data_points matrix that contains a list of input vectors like [parameter1, parameter2, x_value, ...] value_matrix = [[(xe if v == x_pos else value) for v, value in enumerate(values)] for xe in x] y = np.array([ self.evaluate(value_matrix[i]) for i in range(len(value_matrix)) ]) pylab.plot(x,y) x = data["x"] y = data["y"] pylab.scatter(x,y) pylab.show() #given a set of parameters and a graph, compute the error relative to a dataset x, y def error(self,data,parameters=None,variable_position=None): if parameters == None: parameters = self.values if variable_position == None: variable_position = self.variable_position #print("computing error...") #print("current parameters: {0}".format(self.values)) X = data["x"] Y = data["y"] e = 0 arg = deepcopy(parameters) for x,y in zip(X,Y): arg[variable_position] = x e += (y-self.evaluate(arg))**2 return e if e == e else 10**10 def optimize_once(self,data): EPSILON = 0.001 n_args = self.number_of_arguments() #print("number of arguments: {0}".format(n_args)) best_error = self.error(data,variable_position=0) best_parameters = {"X":0,"values":self.values,"error":best_error} for variable_position in range(n_args): grad = gradient(lambda a: self.error(data,parameters=a,variable_position=variable_position),self.values) current_error = self.error(data,variable_position=variable_position) new_values = self.values - EPSILON*grad new_error = self.error(data,parameters = new_values,variable_position = variable_position) if new_error < best_error: best_parameters = {"X":variable_position,"values":new_values,"error":new_error} best_error = new_error self.values = best_parameters["values"] self.variable_position = best_parameters["X"] return best_parameters #optimizes a given graph on a set of data points x and y def optimize(self,data,variable_position=-1,timeout=False): start = time() n_args = self.number_of_arguments() #select a random position in the input vector for the x value if variable_position == -1: variable_position = randint(0,n_args-1) #seed input values = np.random.rand(n_args) #do this for a thousand steps EPSILON = 1 TOLERANCE = 0.01 iterations = 0 MAX_ITERATIONS = 50 while True: grad = gradient(lambda a: self.error(a,variable_position,data),values) current_error = self.error(values,variable_position,data) #if the accuracy goal has been reached, stop and return the input vector if current_error < TOLERANCE: break while True: iterations += 1 enough = (time() - start > 10) or (iterations > MAX_ITERATIONS) if timeout else iterations > MAX_ITERATIONS #if iterations > MAX_ITERATIONS: #if (time() - start > 10) or (iterations > MAX_ITERATIONS): if enough: return {"X": variable_position, "parameters": values, "iterations": iterations, "error": current_error} next_error = self.error(values - EPSILON*grad,variable_position,data) #if the change increases the error, reduce the size of the step if next_error > current_error: EPSILON *= 0.5 #else just move on else: break #print("epsilon: {0}".format(EPSILON)) values += -EPSILON*grad return {"X": variable_position, "parameters": values, "iterations": iterations, "error": current_error} def full_optimize(self,data): min_error = 10**10 n_args = self.number_of_arguments() for n in range(n_args): optimization = self.optimize(data,variable_position=n) if optimization["error"] < min_error: best = optimization return optimization #plot the optimal fit os a graph onto a set of data points def plot_optimized(self,data): pylab.plot(data["x"],data["y"]) error = 10**10 for i in range(self.number_of_arguments()): result = self.optimize(data,i) if result["error"] < error: error = result["error"] values = result["parameters"] x_pos = result["X"] self.plot_specimen(values,[min(data["x"]),max(data["x"])],x_pos) #fitness of a given graph def unfitness(self,data): start = time() result = self.full_optimize(data) end = time() return result["error"] + 1*(end-start) ################################################################################################################################ #g = {"type":"graph","graph":Tree([{"lib_id":0,"input":[],"arg_index":0}])} #lib.append(g) #Add a graph to the lib for each operator #for n in range(1,5): # g = {"type":"graph","graph":Tree([{"lib_id":0,"input":[],"arg_index":0}, # {"lib_id":0,"input":[],"arg_index":1}, # {"lib_id":n,"input":[0,1]}])} # lib.append(g) ################################################################################################################################ class Population: def __init__(self,maxpop = 100): self.BIRTH_PROBABILITY = 0.02 self.MUTATION_PROBABILITY = 0.02 self.DEATH_PROBABILITY = 0.02 self.MAXIMUM_POPULATION_SIZE = maxpop self.population = [] for n, item in enumerate(lib): if item["type"] == "value": tree = Tree(graph=[{"lib_id":0,"input":[],"arg_index":0}],values=[random()]) self.population.append(tree) elif item["type"] == "function": input_vector = [random() for i in range(2)] tree = Tree(graph=[{"lib_id":0,"input":[],"arg_index":0}, {"lib_id":0,"input":[],"arg_index":1}, {"lib_id":n,"input":[0,1]}],values=input_vector) self.population.append(tree) def evolve(self,data,generations=100): for n in range(generations): print("{0}-th year, population size = {1}".format(n,len(self.population))) self.evolution_step(data) return self.best_specimen(data) def birth(self,sorted_population=None): if sorted_population == None: recipient = rd.choice(self.population) donor = rd.choice(self.population) else: recipient = rd.choice(sorted_population[int(0.8*len(sorted_population)):]) recipient = self.population[recipient[0]] donor = rd.choice(sorted_population[:int(0.8*len(sorted_population))]) donor = self.population[donor[0]] baby = insert_at(donor,recipient) return baby def mutate(self,sorted_population=None): if sorted_population == None: mutant = rd.choice(self.population).merge_input_entries() else: mutant = rd.choice(sorted_population[int(0.8*len(sorted_population)):]) mutant = self.population[mutant[0]] return mutant def kill(self,sorted_population=None): if len(self.population) < 10: return None if sorted_population == None: mark = randint(0,len(self.population)) else: mark = rd.choice(sorted_population[:int(0.2*len(sorted_population))]) mark = mark[0] return self.population.pop(mark) if 4 < mark < len(self.population) else None def evolution_step(self,data,n_steps=1): for specimen in self.population: #print("optimizing... ") #specimen.draw() result = specimen.optimize_once(data) #print("result: {0}".format(result)) sorted_population = self.sort_population(data) birth_coin = random() if birth_coin <= self.BIRTH_PROBABILITY: #print("birth:") baby = self.birth(sorted_population=sorted_population) #baby.draw() self.population.append(baby) mutation_coin = random() if mutation_coin <= self.MUTATION_PROBABILITY: #print("mutation:") mutant = self.mutate(sorted_population=sorted_population) #mutant.draw() self.population.append(mutant) death_coin = random() if death_coin <= self.DEATH_PROBABILITY: self.kill(sorted_population=sorted_population) def best_specimen(self,data): best_error = 10**10 best = 0 for specimen in self.population: error = specimen.error(data) if error <= best_error: best = specimen best_error = error return best def worst_specimen(self,data): worst_error = 0 worst = 10**10 for specimen in self.population: error = specimen.error(data) if error >= worst_error: worst = specimen worst_error = error return worst def sort_population(self,data): errors = {} for n, specimen in enumerate(self.population): error = specimen.error(data) errors[n] = error sorted_population = sorted(errors.items(), key=operator.itemgetter(1)) def plot_population(self): return False ################################################################################################################################# #function that combines two graphs by using the output node of the second graph as one of the inputs of the first def insert_at(donor_graph,recipient_graph,site=-1): recipient_args = recipient_graph.input_info() donor_args = donor_graph.input_info() if site == -1: site = randint(0,len(recipient_args)-1) site_index = recipient_args[site] site_arg_index = recipient_graph.graph[site_index]["arg_index"] donor_output = donor_graph.find_output_node() new_graph = [deepcopy(node) for node in recipient_graph.graph] new_graph.pop(site_index) for n, node in enumerate(new_graph): lib_entry = lib[node["lib_id"]] node_type = lib_entry["type"] if node_type == "function" or node_type == "graph": node_input = [ni for ni in node["input"]] for s, subnode in enumerate(node_input): if subnode > site_index: node_input[s] = subnode - 1 elif subnode == site_index: node_input[s] = donor_output + len(new_graph) node["input"] = node_input elif node_type == "value": node["arg_index"] -= (1 if node["arg_index"] > site_arg_index else 0) processed_donor = [deepcopy(node) for node in donor_graph.graph] for n, node in enumerate(processed_donor): lib_entry = lib[node["lib_id"]] node_type = lib_entry["type"] if node_type == "function" or node_type == "graph": node_input = [ni for ni in node["input"]] node["input"] = [ni + len(new_graph) for ni in node_input] elif node_type == "value": node["arg_index"] += len(recipient_args) - 1 return Tree(new_graph+processed_donor) def print2Dmatrix(matrix): print('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in matrix])) #makes updates the parameters of a graph def gradient(function,values): EPSILON = 0.000000001 value_here = np.array([ function(values) for i in range(len(values)) ]) dx = np.array([[ EPSILON if i == j else 0 for i in range(len(values)) ] for j in range(len(values)) ]) value_there = np.array([ function(values+dx[i]) for i in range(len(values)) ]) grad = (value_there-value_here)/EPSILON return grad #takes the items from the library and mixes them to make new graphs until it finds one that fits the data def evolution(data): ACCURACY_GOAL = 0.01 POPULATION_SIZE = 300 MUTATION_RATE = 0.1 BIRTH_RATE = 0.1 CLONING_RATE = 0.05 DEATH_RATE = MUTATION_RATE + BIRTH_RATE + CLONING_RATE population = [] #create the initial population for i in range(POPULATION_SIZE): graph = choice(lib[5:])["graph"].clone() population.append(graph) #compute weights errors = np.array([ specimen.unfitness(data) for specimen in population ]) #stop if minimum error is below the accuracy goal min_error = np.ndarray.min(errors) #save the best best_error_so_far = 10**10 winner = np.argmin(errors) population[winner].draw() population[winner].plot_optimized(data) if min_error < best_error_so_far: best_error_so_far = min_error fittest_specimen = population[winner] if min_error < ACCURACY_GOAL: winner = np.argmin(errors) return {"specimen": population[winner], "error": errors[winner]} weights = errors/sum(errors) #weights = np.array([ 1/POPULATION_SIZE for i in range(POPULATION_SIZE) ]) #evolve for generation in range(20): print('evolving generation {0} of 20'.format(generation)) #compute weights new_population = deepcopy(population) temp_weights = deepcopy(weights) #DEATH ROUND #kill int(POPULATION_SIZE*DEATH_RATE) graphs dead = 0 n_dead = int(POPULATION_SIZE*DEATH_RATE) for kill in range(n_dead): to_die = choice(len(new_population),p=temp_weights) temp_weights = np.delete(temp_weights,to_die) temp_weights = temp_weights/np.sum(temp_weights) new_population.pop(to_die) dead += 1 #print("killed :{0}".format(dead)) #CLONING ROUND #copy int(POPULATION_SIZE*CLONING_RATE) new graphs clones = 0 n_clones = int(POPULATION_SIZE*CLONING_RATE) for i in range(n_clones): clone_index = choice(len(population)) clone = population[clone_index].clone() new_population.append(clone) clones+=1 #print("clones :{0}".format(clones)) #MUTATION ROUND #add int(POPULATION_SIZE*MUTATION_RATE) new graphs mutants = 0 n_mutants = int(POPULATION_SIZE*MUTATION_RATE) for i in range(n_mutants): mutant_index = choice(len(population)) mutant = population[mutant_index].clone() mutant = mutant.merge_input_entries() new_population.append(mutant) mutants+=1 #print("mutants :{0}".format(mutants)) #REPRODUCTION ROUND #add int(POPULATION_SIZE*BIRTH_RATE) new graphs babies = 0 n_babies = n_dead - (n_clones+n_mutants) for i in range(n_babies): mother_index = choice(len(population))#,p=(1-weights)/sum(1-weights))#[0]["graph"] mother = population[mother_index] father_index = choice(len(population),p=(1-weights)/sum(1-weights))#[0]["graph"] father = population[father_index] child = insert_at(father,mother) new_population.append(child) babies+=1 #print("babies :{0}".format(babies)) population = deepcopy(new_population) #recompute the errors for the newcomers errors = deepcopy(errors) first_new_index = POPULATION_SIZE-int(POPULATION_SIZE*DEATH_RATE) for s, specimen in enumerate(population[first_new_index:]): err = specimen.unfitness(data) errors[s+first_new_index] = err#specimen.unfitness(data) if err > 10**7: return specimen #stop if minimum error is below the accuracy goal min_error = np.ndarray.min(errors) #save the best winner = np.argmin(errors) #population[winner].draw() population[winner].plot_optimized(data) if min_error < best_error_so_far: best_error_so_far = min_error fittest_specimen = population[winner] if min_error < ACCURACY_GOAL: winner = np.argmin(errors) return {"specimen": population[winner], "error": errors[winner]} weights = errors/sum(errors) #print("population size : {0}".format(len(population))) print("min error at generation n."+str(generation)+": "+str(np.ndarray.min(errors))) #recompute final errors errors = np.array([ specimen.unfitness(data) for specimen in population ]) winner = np.argmin(errors) if errors[winner] < best_error_so_far: best_error_so_far = errors[winner] fittest_specimen = population[winner] return {"specimen": fittest_specimen, "error": best_error_so_far} ################################################################################################################################
unlicense
jaeilepp/mne-python
tutorials/plot_object_raw.py
1
5456
# -*- coding: utf-8 -*- """ .. _tut_raw_objects: The :class:`Raw <mne.io.Raw>` data structure: continuous data ============================================================= """ from __future__ import print_function import mne import os.path as op from matplotlib import pyplot as plt ############################################################################### # Continuous data is stored in objects of type :class:`Raw <mne.io.Raw>`. # The core data structure is simply a 2D numpy array (channels × samples, # stored in a private attribute called `._data`) combined with an # :class:`Info <mne.Info>` object (`.info` attribute) # (see :ref:`tut_info_objects`). # # The most common way to load continuous data is from a .fif file. For more # information on :ref:`loading data from other formats <ch_convert>`, or # creating it :ref:`from scratch <tut_creating_data_structures>`. ############################################################################### # Loading continuous data # ----------------------- # Load an example dataset, the preload flag loads the data into memory now data_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample', 'sample_audvis_raw.fif') raw = mne.io.read_raw_fif(data_path, preload=True) raw.set_eeg_reference('average', projection=True) # set EEG average reference # Give the sample rate print('sample rate:', raw.info['sfreq'], 'Hz') # Give the size of the data matrix print('channels x samples:', raw._data.shape) ############################################################################### # .. note:: Accessing the `._data` attribute is done here for educational # purposes. However this is a private attribute as its name starts # with an `_`. This suggests that you should **not** access this # variable directly but rely on indexing syntax detailed just below. ############################################################################### # Information about the channels contained in the :class:`Raw <mne.io.Raw>` # object is contained in the :class:`Info <mne.Info>` attribute. # This is essentially a dictionary with a number of relevant fields (see # :ref:`tut_info_objects`). ############################################################################### # Indexing data # ------------- # # To access the data stored within :class:`Raw <mne.io.Raw>` objects, # it is possible to index the :class:`Raw <mne.io.Raw>` object. # # Indexing a :class:`Raw <mne.io.Raw>` object will return two arrays: an array # of times, as well as the data representing those timepoints. This works # even if the data is not preloaded, in which case the data will be read from # disk when indexing. The syntax is as follows: # Extract data from the first 5 channels, from 1 s to 3 s. sfreq = raw.info['sfreq'] data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)] _ = plt.plot(times, data.T) _ = plt.title('Sample channels') ############################################################################### # ----------------------------------------- # Selecting subsets of channels and samples # ----------------------------------------- # # It is possible to use more intelligent indexing to extract data, using # channel names, types or time ranges. # Pull all MEG gradiometer channels: # Make sure to use .copy() or it will overwrite the data meg_only = raw.copy().pick_types(meg=True) eeg_only = raw.copy().pick_types(meg=False, eeg=True) # The MEG flag in particular lets you specify a string for more specificity grad_only = raw.copy().pick_types(meg='grad') # Or you can use custom channel names pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123'] specific_chans = raw.copy().pick_channels(pick_chans) print(meg_only, eeg_only, grad_only, specific_chans, sep='\n') ############################################################################### # Notice the different scalings of these types f, (a1, a2) = plt.subplots(2, 1) eeg, times = eeg_only[0, :int(sfreq * 2)] meg, times = meg_only[0, :int(sfreq * 2)] a1.plot(times, meg[0]) a2.plot(times, eeg[0]) del eeg, meg, meg_only, grad_only, eeg_only, data, specific_chans ############################################################################### # You can restrict the data to a specific time range raw = raw.crop(0, 50) # in seconds print('New time range from', raw.times.min(), 's to', raw.times.max(), 's') ############################################################################### # And drop channels by name nchan = raw.info['nchan'] raw = raw.drop_channels(['MEG 0241', 'EEG 001']) print('Number of channels reduced from', nchan, 'to', raw.info['nchan']) ############################################################################### # -------------------------------------------------- # Concatenating :class:`Raw <mne.io.Raw>` objects # -------------------------------------------------- # # :class:`Raw <mne.io.Raw>` objects can be concatenated in time by using the # :func:`append <mne.io.Raw.append>` function. For this to work, they must # have the same number of channels and their :class:`Info # <mne.Info>` structures should be compatible. # Create multiple :class:`Raw <mne.io.RawFIF>` objects raw1 = raw.copy().crop(0, 10) raw2 = raw.copy().crop(10, 20) raw3 = raw.copy().crop(20, 40) # Concatenate in time (also works without preloading) raw1.append([raw2, raw3]) print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
bsd-3-clause
pvougiou/KB-Text-Alignment
src/Dataset-WikiAstronauts.py
1
17117
""" Copyright 2016 Pavlos Vougiouklis Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import xml.etree.ElementTree as ET import csv import re from nltk.tokenize import RegexpTokenizer import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d import os import glob import shutil import cPickle as pickle xml_dir = 'Data/WikiAstronauts/XML/' csv_dir = 'Data/wikiAstronauts/CSV/' exp_dir = 'CrowdFlower/WikiAstronauts/' cache_dir = 'Caches/WikiAstronauts/' num_files = 0 processed_files = 0 num_sentences = 0 included_sentences = [0, 0, 0] facts = 0 processed_facts = 0 tokens_triples = [] num_triples = [] num_tokens = [] unique_predicates = [] predicates = {} num_annotations = 0 dictionary = [] # The structure of the data -- list of dictionaries. #data = [{'value': .., 'annotated_sentence': .., 'triples': [..], 'simplification': ..}] def annotate(text): while text.find('[[') > -1: start_flag = text.find('[[') or_flag = text.find('|') end_flag = text.find(']]') text = text.replace('[[', '<b><font color="purple">', 1) text = text.replace(text[or_flag + len('<b><font color="purple">') - 2 :end_flag + len('<b><font color="purple">') - 2 + 2], '</font></b>', 1) return text def get_annotations(filename, sentence): total = 0 add_annotations(filename, sentence) while sentence.find('[[') > -1: sentence = sentence.replace('[[', '', 1) total = total + 1 return total def get_predicate(triple): start_flag = triple.find(' ') tmp = triple.replace(' ', '', 1) end_flag = tmp.find(' ') return triple[start_flag + 1:end_flag + 1] def construct_graph_token_triples(data): plt.rc('text', usetex=True) plt.rc('font', family='serif') x = np.arange(min(data), max(data) + 5, 5) total = {item: 0 for item in x} for item in data: for i in range(0, len(x)): if item <= x[i]: total[x[i]] = total[x[i]] + 1 break y = [] for item in x: y.append(total[item]) print(x) print(y) plt.plot(x, y, '-o', color='b', linewidth=2.0) plt.xlabel(r'$^{\textrm{Number of Tokens}}/_{\textrm{Number of Triples}}$') plt.ylabel("Number of Sentences") plt.grid() plt.show() def construct_graph_triples(data): plt.rc('text', usetex=True) plt.rc('font', family='serif') x = np.arange(min(data), max(data) + 1, 1) total = {item: 0 for item in x} for item in data: for i in range(0, len(x)): if item <= x[i]: total[x[i]] = total[x[i]] + 1 break y = [] for item in x: y.append(total[item]) print(x) print(y) plt.plot(x, y, '-o', color='r', linewidth=2.0) plt.xlabel("Number of Triples") plt.ylabel("Number of Sentences") plt.grid() plt.show() def construct_graph_tokens(data): plt.rc('text', usetex=True) plt.rc('font', family='serif') x = np.arange(min(data), max(data) + 1, 1) total = {item: 0 for item in x} for item in data: for i in range(0, len(x)): if item <= x[i]: total[x[i]] = total[x[i]] + 1 break y = [] for item in x: y.append(total[item]) print(x) print(y) plt.plot(x, y, '-o', color='c', linewidth=2.0) plt.xlabel("Number of Tokens") plt.ylabel("Number of Sentences") plt.grid() plt.show() def construct_graph_tokens_to_triples(tokens, triples): plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.plot(tokens, triples, 'o', color='r', linewidth=2.0) plt.xlabel("Number of Tokens") plt.ylabel("Number of Triples") plt.grid() plt.show() def dump_cache(data): if os.path.exists(cache_dir): shutil.rmtree(cache_dir) os.makedirs(cache_dir) pickle.dump(data, open(cache_dir + 'dataset-WikiAstronauts.p', "wb")) def add_annotations(text, sentence): search = ['He', 'She'] tokenizer = RegexpTokenizer(r'\w+') text = text.replace('_', ' ') text_tokens = tokenizer.tokenize(text) search.extend(text_tokens) flag = False for entity in search: pos = 0 while sentence[pos:].find(entity) > -1: pos = sentence[pos:].find(entity) + pos if sentence[pos - 1] != '[' and sentence[pos + len(entity)] == ' ' and (sentence[pos - 1] == ' ' or sentence[pos - 1] == '\t') and pos >= 1 or \ sentence[pos + len(entity)] != '|' and sentence[pos + len(entity)] == ' ' and pos == 0: sentence = sentence[:pos] + '[[' + entity + '|' + entity + ']]' + sentence[pos+len(entity):] pos = pos + len('[[' + entity + '|' + entity + ']]') flag = True else: pos = pos + len(entity) return sentence def dataset(): global tokens_triples global num_sentences global included_sentences global processed_facts global num_triples global num_tokens global num_annotations global unique_predicates global predicates global dictionary if os.path.exists(exp_dir + 'experiment.csv'): os.remove(exp_dir + 'experiment.csv') tokenizer = RegexpTokenizer(r'\w+') with open(exp_dir + 'experiment.csv', 'wb') as exp_file: fieldnames = ['Sentence'] writer = csv.DictWriter(exp_file, fieldnames = fieldnames) writer.writeheader() for filename in glob.glob(os.path.join(csv_dir, '*.csv')): #print filename with open(filename, 'rb') as csv_file: csv_reader = csv.reader(csv_file) csv_reader.next() rows_flag = 0; with open(filename.replace('CSV', 'XML').replace('csv', 'xml'), 'r') as xml_file: sentence_number = -1 xml = xml_file.read() xml = unicode(xml, 'ascii', errors='ignore') root = ET.fromstring(xml) for row in csv_reader: sentence_number = sentence_number + 1 num_sentences = num_sentences + 1 """ if len(root[sentence_number][5]) >= 1 and row[0].find('?') == -1: rows_flag = rows_flag + 1 dictionary.append({'annotated_sentence': root[sentence_number][2].text, 'value': root[sentence_number][0].text, 'triples': [], 'simplification': ''}) for triple in range(0, len(root[sentence_number][5])): dictionary[sum(included_sentences)]['triples'].append(root[sentence_number][5][triple].text) included_sentences[0] = included_sentences[0] + 1 writer.writerow({'Sentence': row[0]}) num_triples.append(len(root[sentence_number][5])) num_tokens.append(len(tokenizer.tokenize(root[sentence_number][0].text))) tokens_triples.append(len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5])) processed_facts = processed_facts + len(root[sentence_number][5]) """ if len(root[sentence_number][5]) >= 1: if row[0].find('?') == -1 and (len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5]) >= 20) and included_sentences[2] < 200: rows_flag = rows_flag + 1 dictionary.append({'annotated_sentence': root[sentence_number][2].text, 'value': root[sentence_number][0].text, 'triples': [], 'simplification': ''}) for triple in range(0, len(root[sentence_number][5])): dictionary[sum(included_sentences)]['triples'].append(root[sentence_number][5][triple].text) included_sentences[2] = included_sentences[2] + 1 writer.writerow({'Sentence': row[0]}) #print row[0] num_annotations = num_annotations + get_annotations(filename.rsplit('/',1)[-1], root[sentence_number][2].text) for triple in range(0, len(root[sentence_number][5])): predicate = get_predicate(root[sentence_number][5][triple].text) if predicate not in unique_predicates: unique_predicates.append(predicate) predicates[predicate] = 1 else: predicates[predicate] = predicates[predicate] + 1 num_triples.append(len(root[sentence_number][5])) num_tokens.append(len(tokenizer.tokenize(root[sentence_number][0].text))) tokens_triples.append(len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5])) #print sentence_number #print xml_file processed_facts = processed_facts + len(root[sentence_number][5]) if row[0].find('?') == -1 and (len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5]) < 20) \ and len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5]) >= 10 and included_sentences[1] < 200: rows_flag = rows_flag + 1 dictionary.append({'annotated_sentence': root[sentence_number][2].text, 'value': root[sentence_number][0].text, 'triples': [], 'simplification': ''}) for triple in range(0, len(root[sentence_number][5])): dictionary[sum(included_sentences)]['triples'].append(root[sentence_number][5][triple].text) included_sentences[1] = included_sentences[1] + 1 writer.writerow({'Sentence': row[0]}) #print row[0] num_annotations = num_annotations + get_annotations(filename.rsplit('/',1)[-1], root[sentence_number][2].text) for triple in range(0, len(root[sentence_number][5])): predicate = get_predicate(root[sentence_number][5][triple].text) if predicate not in unique_predicates: unique_predicates.append(predicate) predicates[predicate] = 1 else: predicates[predicate] = predicates[predicate] + 1 num_triples.append(len(root[sentence_number][5])) num_tokens.append(len(tokenizer.tokenize(root[sentence_number][0].text))) tokens_triples.append(len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5])) #print sentence_number #print xml_file processed_facts = processed_facts + len(root[sentence_number][5]) if row[0].find('?') == -1 and (len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5]) < 10) \ and len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5]) >= 5 and included_sentences[0] < 200: rows_flag = rows_flag + 1 dictionary.append({'annotated_sentence': root[sentence_number][2].text, 'value': root[sentence_number][0].text, 'triples': [], 'simplification': ''}) for triple in range(0, len(root[sentence_number][5])): dictionary[sum(included_sentences)]['triples'].append(root[sentence_number][5][triple].text) included_sentences[0] = included_sentences[0] + 1 writer.writerow({'Sentence': row[0]}) #print row[0] num_annotations = num_annotations + get_annotations(filename.rsplit('/',1)[-1], root[sentence_number][2].text) for triple in range(0, len(root[sentence_number][5])): predicate = get_predicate(root[sentence_number][5][triple].text) if predicate not in unique_predicates: unique_predicates.append(predicate) predicates[predicate] = 1 else: predicates[predicate] = predicates[predicate] + 1 num_triples.append(len(root[sentence_number][5])) num_tokens.append(len(tokenizer.tokenize(root[sentence_number][0].text))) tokens_triples.append(len(tokenizer.tokenize(root[sentence_number][0].text)) / len(root[sentence_number][5])) #print sentence_number #print xml_file processed_facts = processed_facts + len(root[sentence_number][5]) xml_file.close() csv_file.close() exp_file.close() # It sorts the dictionary of predicates according to the times of occurrence. #print sorted(predicates.items(), key=lambda x:x[1], reverse=True) print('%d out of the total %d sentences have been included.' % (sum(included_sentences), num_sentences)) print('Total number of facts-triples that have been included: %d' % (processed_facts)) print('Total number of tokens of the sentences that have been included: %d' % (sum(num_tokens))) print('Total number of arguments of the sentences that have been included: %d' % (num_annotations)) print('Total number of unique predicates of the sentences that have been included: %d' % (len(unique_predicates))) def main(): if os.path.exists(csv_dir): shutil.rmtree(csv_dir) os.makedirs(csv_dir) #parser = etree.XMLParser(encoding="utf-8") for filename in glob.glob(os.path.join(xml_dir, '*.xml')): global num_files num_files = num_files + 1 with open(filename, 'r') as xml_file: with open(csv_dir + filename.replace(xml_dir, '').replace('.xml', '.csv'), 'wb') as csv_file: fieldnames = ['Sentence'] writer = csv.DictWriter(csv_file, fieldnames = fieldnames) writer.writeheader() xml = xml_file.read() xml = unicode(xml, 'ascii', errors='ignore') try: root = ET.fromstring(xml) global processed_files global facts processed_files = processed_files + 1 for i in range(0, len(root)): writer.writerow({'Sentence': annotate(add_annotations(filename.rsplit('/',1)[-1], root[i][2].text))}) facts = facts + len(root[i][5]) csv_file.close() except ET.ParseError: csv_file.close() os.remove(csv_dir + filename.replace(xml_dir, '').replace('.xml', '.csv')) #print(filename + ' has not been included.') xml_file.close() print('%d / %d XML files have been processed.' % (processed_files, num_files)) print('Total number of facts-triples that have been processed: %d' % (facts)) main() dataset() dump_cache(dictionary) #construct_graph_token_triples(tokens_triples) #construct_graph_triples(num_triples) #construct_graph_tokens(num_tokens) #construct_graph_tokens_to_triples(num_tokens, num_triples)
apache-2.0
AnasGhrab/scikit-learn
examples/datasets/plot_random_multilabel_dataset.py
278
3402
""" ============================================== Plot randomly generated multilabel dataset ============================================== This illustrates the `datasets.make_multilabel_classification` dataset generator. Each sample consists of counts of two features (up to 50 in total), which are differently distributed in each of two classes. Points are labeled as follows, where Y means the class is present: ===== ===== ===== ====== 1 2 3 Color ===== ===== ===== ====== Y N N Red N Y N Blue N N Y Yellow Y Y N Purple Y N Y Orange Y Y N Green Y Y Y Brown ===== ===== ===== ====== A star marks the expected sample for each class; its size reflects the probability of selecting that class label. The left and right examples highlight the ``n_labels`` parameter: more of the samples in the right plot have 2 or 3 labels. Note that this two-dimensional example is very degenerate: generally the number of features would be much greater than the "document length", while here we have much larger documents than vocabulary. Similarly, with ``n_classes > n_features``, it is much less likely that a feature distinguishes a particular class. """ from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_multilabel_classification as make_ml_clf print(__doc__) COLORS = np.array(['!', '#FF3333', # red '#0198E1', # blue '#BF5FFF', # purple '#FCD116', # yellow '#FF7216', # orange '#4DBD33', # green '#87421F' # brown ]) # Use same random seed for multiple calls to make_multilabel_classification to # ensure same distributions RANDOM_SEED = np.random.randint(2 ** 10) def plot_2d(ax, n_labels=1, n_classes=3, length=50): X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2, n_classes=n_classes, n_labels=n_labels, length=length, allow_unlabeled=False, return_distributions=True, random_state=RANDOM_SEED) ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4] ).sum(axis=1)), marker='.') ax.scatter(p_w_c[0] * length, p_w_c[1] * length, marker='*', linewidth=.5, edgecolor='black', s=20 + 1500 * p_c ** 2, color=COLORS.take([1, 2, 4])) ax.set_xlabel('Feature 0 count') return p_c, p_w_c _, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4)) plt.subplots_adjust(bottom=.15) p_c, p_w_c = plot_2d(ax1, n_labels=1) ax1.set_title('n_labels=1, length=50') ax1.set_ylabel('Feature 1 count') plot_2d(ax2, n_labels=3) ax2.set_title('n_labels=3, length=50') ax2.set_xlim(left=0, auto=True) ax2.set_ylim(bottom=0, auto=True) plt.show() print('The data was generated from (random_state=%d):' % RANDOM_SEED) print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t') for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T): print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
bsd-3-clause
huobaowangxi/scikit-learn
sklearn/datasets/mlcomp.py
289
3855
# Copyright (c) 2010 Olivier Grisel <[email protected]> # License: BSD 3 clause """Glue code to load http://mlcomp.org data as a scikit.learn dataset""" import os import numbers from sklearn.datasets.base import load_files def _load_document_classification(dataset_path, metadata, set_=None, **kwargs): if set_ is not None: dataset_path = os.path.join(dataset_path, set_) return load_files(dataset_path, metadata.get('description'), **kwargs) LOADERS = { 'DocumentClassification': _load_document_classification, # TODO: implement the remaining domain formats } def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs): """Load a datasets as downloaded from http://mlcomp.org Parameters ---------- name_or_id : the integer id or the string name metadata of the MLComp dataset to load set_ : select the portion to load: 'train', 'test' or 'raw' mlcomp_root : the filesystem path to the root folder where MLComp datasets are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME environment variable is looked up instead. **kwargs : domain specific kwargs to be passed to the dataset loader. Read more in the :ref:`User Guide <datasets>`. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'filenames', the files holding the raw to learn, 'target', the classification labels (integer index), 'target_names', the meaning of the labels, and 'DESCR', the full description of the dataset. Note on the lookup process: depending on the type of name_or_id, will choose between integer id lookup or metadata name lookup by looking at the unzipped archives and metadata file. TODO: implement zip dataset loading too """ if mlcomp_root is None: try: mlcomp_root = os.environ['MLCOMP_DATASETS_HOME'] except KeyError: raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined") mlcomp_root = os.path.expanduser(mlcomp_root) mlcomp_root = os.path.abspath(mlcomp_root) mlcomp_root = os.path.normpath(mlcomp_root) if not os.path.exists(mlcomp_root): raise ValueError("Could not find folder: " + mlcomp_root) # dataset lookup if isinstance(name_or_id, numbers.Integral): # id lookup dataset_path = os.path.join(mlcomp_root, str(name_or_id)) else: # assume name based lookup dataset_path = None expected_name_line = "name: " + name_or_id for dataset in os.listdir(mlcomp_root): metadata_file = os.path.join(mlcomp_root, dataset, 'metadata') if not os.path.exists(metadata_file): continue with open(metadata_file) as f: for line in f: if line.strip() == expected_name_line: dataset_path = os.path.join(mlcomp_root, dataset) break if dataset_path is None: raise ValueError("Could not find dataset with metadata line: " + expected_name_line) # loading the dataset metadata metadata = dict() metadata_file = os.path.join(dataset_path, 'metadata') if not os.path.exists(metadata_file): raise ValueError(dataset_path + ' is not a valid MLComp dataset') with open(metadata_file) as f: for line in f: if ":" in line: key, value = line.split(":", 1) metadata[key.strip()] = value.strip() format = metadata.get('format', 'unknow') loader = LOADERS.get(format) if loader is None: raise ValueError("No loader implemented for format: " + format) return loader(dataset_path, metadata, set_=set_, **kwargs)
bsd-3-clause
sangwook236/SWDT
sw_dev/python/rnd/test/machine_learning/sklearn/sklearn_random_forest.py
2
1931
#!/usr/bin/env python # -*- coding: UTF-8 -*- # REF [site] >> http://scikit-learn.org/stable/modules/ensemble.html # REF [site] >> http://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_iris.html from sklearn import ensemble from sklearn import datasets import numpy as np #--------------------------------------------------------------------- def random_forest_classifier_example(): X, Y = datasets.make_classification(n_samples=1000, n_features=4, n_informative=2, n_redundant=0, n_repeated=0, n_classes=2, shuffle=False, random_state=0) #X, Y = datasets.make_blobs(n_samples=1000, n_features=4, centers=2, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=False, random_state=0) classifier = ensemble.RandomForestClassifier(n_estimators=10, criterion='gini', max_depth=2, random_state=0) classifier.fit(X, Y) print('Feature importance =', classifier.feature_importances_) X_test = [[0, 0, 0, 0]] #X_test = X print('Prediction =', classifier.predict(X_test)) print('Prediction (probability) =', classifier.predict_proba(X_test)) print('Prediction (log probability) =', classifier.predict_log_proba(X_test)) print('Score =', classifier.score(X, Y)) #--------------------------------------------------------------------- def random_forest_regressor_example(): X, Y = datasets.make_regression(n_samples=1000, n_features=4, n_informative=2, n_targets=1, shuffle=False, random_state=0) regressor = ensemble.RandomForestRegressor(n_estimators=10, criterion='mse', max_depth=2, random_state=0) regressor.fit(X, Y) print('Feature importances =', regressor.feature_importances_) X_test = [[0, 0, 0, 0]] #X_test = X print('Prediction =', regressor.predict(X_test)) print('Score =', regressor.score(X, Y)) def main(): random_forest_classifier_example() random_forest_regressor_example() #-------------------------------------------------------------------- if '__main__' == __name__: main()
gpl-3.0
hbhdytf/mac
swift/common/middleware/x_profile/html_viewer.py
25
21039
# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import cgi import os import random import re import string import tempfile from swift import gettext_ as _ from exceptions import PLOTLIBNotInstalled, ODFLIBNotInstalled,\ NotFoundException, MethodNotAllowed, DataLoadFailure, ProfileException from profile_model import Stats2 PLOTLIB_INSTALLED = True try: import matplotlib # use agg backend for writing to file, not for rendering in a window. # otherwise some platform will complain "no display name and $DISPLAY # environment variable" matplotlib.use('agg') import matplotlib.pyplot as plt except ImportError: PLOTLIB_INSTALLED = False empty_description = """ The default profile of current process or the profile you requested is empty. <input type="submit" name="refresh" value="Refresh"/> """ profile_tmpl = """ <select name="profile"> <option value="current">current</option> <option value="all">all</option> ${profile_list} </select> """ sort_tmpl = """ <select name="sort"> <option value="time">time</option> <option value="cumulative">cumulative</option> <option value="calls">calls</option> <option value="pcalls">pcalls</option> <option value="name">name</option> <option value="file">file</option> <option value="module">module</option> <option value="line">line</option> <option value="nfl">nfl</option> <option value="stdname">stdname</option> </select> """ limit_tmpl = """ <select name="limit"> <option value="-1">all</option> <option value="0.1">10%</option> <option value="0.2">20%</option> <option value="0.3">30%</option> <option value="10">10</option> <option value="20">20</option> <option value="30">30</option> <option value="50">50</option> <option value="100">100</option> <option value="200">200</option> <option value="300">300</option> <option value="400">400</option> <option value="500">500</option> </select> """ fulldirs_tmpl = """ <input type="checkbox" name="fulldirs" value="1" ${fulldir_checked}/> """ mode_tmpl = """ <select name="mode"> <option value="stats">stats</option> <option value="callees">callees</option> <option value="callers">callers</option> </select> """ nfl_filter_tmpl = """ <input type="text" name="nfl_filter" value="${nfl_filter}" placeholder="filename part" /> """ formelements_tmpl = """ <div> <table> <tr> <td> <strong>Profile</strong> <td> <strong>Sort</strong> </td> <td> <strong>Limit</strong> </td> <td> <strong>Full Path</strong> </td> <td> <strong>Filter</strong> </td> <td> </td> <td> <strong>Plot Metric</strong> </td> <td> <strong>Plot Type</strong> <td> </td> <td> <strong>Format</strong> </td> <td> <td> </td> <td> </td> </tr> <tr> <td> ${profile} <td> ${sort} </td> <td> ${limit} </td> <td> ${fulldirs} </td> <td> ${nfl_filter} </td> <td> <input type="submit" name="query" value="query"/> </td> <td> <select name='metric'> <option value='nc'>call count</option> <option value='cc'>primitive call count</option> <option value='tt'>total time</option> <option value='ct'>cumulative time</option> </select> </td> <td> <select name='plottype'> <option value='bar'>bar</option> <option value='pie'>pie</option> </select> <td> <input type="submit" name="plot" value="plot"/> </td> <td> <select name='format'> <option value='default'>binary</option> <option value='json'>json</option> <option value='csv'>csv</option> <option value='ods'>ODF.ods</option> </select> </td> <td> <input type="submit" name="download" value="download"/> </td> <td> <input type="submit" name="clear" value="clear"/> </td> </tr> </table> </div> """ index_tmpl = """ <html> <head> <title>profile results</title> <style> <!-- tr.normal { background-color: #ffffff } tr.hover { background-color: #88eeee } //--> </style> </head> <body> <form action="${action}" method="POST"> <div class="form-text"> ${description} </div> <hr /> ${formelements} </form> <pre> ${profilehtml} </pre> </body> </html> """ class HTMLViewer(object): format_dict = {'default': 'application/octet-stream', 'json': 'application/json', 'csv': 'text/csv', 'ods': 'application/vnd.oasis.opendocument.spreadsheet', 'python': 'text/html'} def __init__(self, app_path, profile_module, profile_log): self.app_path = app_path self.profile_module = profile_module self.profile_log = profile_log def _get_param(self, query_dict, key, default=None, multiple=False): value = query_dict.get(key, default) if value is None or value == '': return default if multiple: return value if isinstance(value, list): return eval(value[0]) if isinstance(default, int) else value[0] else: return value def render(self, url, method, path_entry, query_dict, clear_callback): plot = self._get_param(query_dict, 'plot', None) download = self._get_param(query_dict, 'download', None) clear = self._get_param(query_dict, 'clear', None) action = plot or download or clear profile_id = self._get_param(query_dict, 'profile', 'current') sort = self._get_param(query_dict, 'sort', 'time') limit = self._get_param(query_dict, 'limit', -1) fulldirs = self._get_param(query_dict, 'fulldirs', 0) nfl_filter = self._get_param(query_dict, 'nfl_filter', '').strip() metric_selected = self._get_param(query_dict, 'metric', 'cc') plot_type = self._get_param(query_dict, 'plottype', 'bar') download_format = self._get_param(query_dict, 'format', 'default') content = '' # GET /__profile, POST /__profile if len(path_entry) == 2 and method in ['GET', 'POST']: log_files = self.profile_log.get_logfiles(profile_id) if action == 'plot': content, headers = self.plot(log_files, sort, limit, nfl_filter, metric_selected, plot_type) elif action == 'download': content, headers = self.download(log_files, sort, limit, nfl_filter, download_format) else: if action == 'clear': self.profile_log.clear(profile_id) clear_callback and clear_callback() content, headers = self.index_page(log_files, sort, limit, fulldirs, nfl_filter, profile_id, url) # GET /__profile__/all # GET /__profile__/current # GET /__profile__/profile_id # GET /__profile__/profile_id/ # GET /__profile__/profile_id/account.py:50(GETorHEAD) # GET /__profile__/profile_id/swift/proxy/controllers # /account.py:50(GETorHEAD) # with QUERY_STRING: ?format=[default|json|csv|ods] elif len(path_entry) > 2 and method == 'GET': profile_id = path_entry[2] log_files = self.profile_log.get_logfiles(profile_id) pids = self.profile_log.get_all_pids() # return all profiles in a json format by default. # GET /__profile__/ if profile_id == '': content = '{"profile_ids": ["' + '","'.join(pids) + '"]}' headers = [('content-type', self.format_dict['json'])] else: if len(path_entry) > 3 and path_entry[3] != '': nfl_filter = '/'.join(path_entry[3:]) if path_entry[-1].find(':0') == -1: nfl_filter = '/' + nfl_filter content, headers = self.download(log_files, sort, -1, nfl_filter, download_format) headers.append(('Access-Control-Allow-Origin', '*')) else: raise MethodNotAllowed(_('method %s is not allowed.') % method) return content, headers def index_page(self, log_files=None, sort='time', limit=-1, fulldirs=0, nfl_filter='', profile_id='current', url='#'): headers = [('content-type', 'text/html')] if len(log_files) == 0: return empty_description, headers try: stats = Stats2(*log_files) except (IOError, ValueError): raise DataLoadFailure(_('Can not load profile data from %s.') % log_files) if not fulldirs: stats.strip_dirs() stats.sort_stats(sort) nfl_filter_esc =\ nfl_filter.replace('(', '\(').replace(')', '\)') amount = [nfl_filter_esc, limit] if nfl_filter_esc else [limit] profile_html = self.generate_stats_html(stats, self.app_path, profile_id, *amount) description = "Profiling information is generated by using\ '%s' profiler." % self.profile_module sort_repl = '<option value="%s">' % sort sort_selected = '<option value="%s" selected>' % sort sort = sort_tmpl.replace(sort_repl, sort_selected) plist = ''.join(['<option value="%s">%s</option>' % (p, p) for p in self.profile_log.get_all_pids()]) profile_element = string.Template(profile_tmpl).substitute( {'profile_list': plist}) profile_repl = '<option value="%s">' % profile_id profile_selected = '<option value="%s" selected>' % profile_id profile_element = profile_element.replace(profile_repl, profile_selected) limit_repl = '<option value="%s">' % limit limit_selected = '<option value="%s" selected>' % limit limit = limit_tmpl.replace(limit_repl, limit_selected) fulldirs_checked = 'checked' if fulldirs else '' fulldirs_element = string.Template(fulldirs_tmpl).substitute( {'fulldir_checked': fulldirs_checked}) nfl_filter_element = string.Template(nfl_filter_tmpl).\ substitute({'nfl_filter': nfl_filter}) form_elements = string.Template(formelements_tmpl).substitute( {'description': description, 'action': url, 'profile': profile_element, 'sort': sort, 'limit': limit, 'fulldirs': fulldirs_element, 'nfl_filter': nfl_filter_element, } ) content = string.Template(index_tmpl).substitute( {'formelements': form_elements, 'action': url, 'description': description, 'profilehtml': profile_html, }) return content, headers def download(self, log_files, sort='time', limit=-1, nfl_filter='', output_format='default'): if len(log_files) == 0: raise NotFoundException(_('no log file found')) try: nfl_esc = nfl_filter.replace('(', '\(').replace(')', '\)') # remove the slash that is intentionally added in the URL # to avoid failure of filtering stats data. if nfl_esc.startswith('/'): nfl_esc = nfl_esc[1:] stats = Stats2(*log_files) stats.sort_stats(sort) if output_format == 'python': data = self.format_source_code(nfl_filter) elif output_format == 'json': data = stats.to_json(nfl_esc, limit) elif output_format == 'csv': data = stats.to_csv(nfl_esc, limit) elif output_format == 'ods': data = stats.to_ods(nfl_esc, limit) else: data = stats.print_stats() return data, [('content-type', self.format_dict[output_format])] except ODFLIBNotInstalled as ex: raise ex except Exception as ex: raise ProfileException(_('Data download error: %s') % ex) def plot(self, log_files, sort='time', limit=10, nfl_filter='', metric_selected='cc', plot_type='bar'): if not PLOTLIB_INSTALLED: raise PLOTLIBNotInstalled(_('python-matplotlib not installed.')) if len(log_files) == 0: raise NotFoundException(_('no log file found')) try: stats = Stats2(*log_files) stats.sort_stats(sort) stats_dict = stats.stats __, func_list = stats.get_print_list([nfl_filter, limit]) nfls = [] performance = [] names = {'nc': 'Total Call Count', 'cc': 'Primitive Call Count', 'tt': 'Total Time', 'ct': 'Cumulative Time'} for func in func_list: cc, nc, tt, ct, __ = stats_dict[func] metric = {'cc': cc, 'nc': nc, 'tt': tt, 'ct': ct} nfls.append(func[2]) performance.append(metric[metric_selected]) y_pos = range(len(nfls)) error = [random.random() for __ in y_pos] plt.clf() if plot_type == 'pie': plt.pie(x=performance, explode=None, labels=nfls, autopct='%1.1f%%') else: plt.barh(y_pos, performance, xerr=error, align='center', alpha=0.4) plt.yticks(y_pos, nfls) plt.xlabel(names[metric_selected]) plt.title('Profile Statistics (by %s)' % names[metric_selected]) # plt.gcf().tight_layout(pad=1.2) with tempfile.TemporaryFile() as profile_img: plt.savefig(profile_img, format='png', dpi=300) profile_img.seek(0) data = profile_img.read() return data, [('content-type', 'image/jpg')] except Exception as ex: raise ProfileException(_('plotting results failed due to %s') % ex) def format_source_code(self, nfl): nfls = re.split('[:()]', nfl) file_path = nfls[0] try: lineno = int(nfls[1]) except (TypeError, ValueError, IndexError): lineno = 0 # for security reason, this need to be fixed. if not file_path.endswith('.py'): return _('The file type are forbidden to access!') try: data = [] i = 0 with open(file_path) as f: lines = f.readlines() max_width = str(len(str(len(lines)))) fmt = '<span id="L%d" rel="#L%d">%' + max_width\ + 'd|<code>%s</code></span>' for line in lines: l = cgi.escape(line, quote=None) i = i + 1 if i == lineno: fmt2 = '<span id="L%d" style="background-color: \ rgb(127,255,127)">%' + max_width +\ 'd|<code>%s</code></span>' data.append(fmt2 % (i, i, l)) else: data.append(fmt % (i, i, i, l)) data = ''.join(data) except Exception: return _('Can not access the file %s.') % file_path return '<pre>%s</pre>' % data def generate_stats_html(self, stats, app_path, profile_id, *selection): html = [] for filename in stats.files: html.append('<p>%s</p>' % filename) try: for func in stats.top_level: html.append('<p>%s</p>' % func[2]) html.append('%s function calls' % stats.total_calls) if stats.total_calls != stats.prim_calls: html.append("(%d primitive calls)" % stats.prim_calls) html.append('in %.3f seconds' % stats.total_tt) if stats.fcn_list: stat_list = stats.fcn_list[:] msg = "<p>Ordered by: %s</p>" % stats.sort_type else: stat_list = stats.stats.keys() msg = '<p>Random listing order was used</p>' for sel in selection: stat_list, msg = stats.eval_print_amount(sel, stat_list, msg) html.append(msg) html.append('<table style="border-width: 1px">') if stat_list: html.append('<tr><th>#</th><th>Call Count</th>\ <th>Total Time</th><th>Time/Call</th>\ <th>Cumulative Time</th>\ <th>Cumulative Time/Call</th>\ <th>Filename:Lineno(Function)</th>\ <th>JSON</th>\ </tr>') count = 0 for func in stat_list: count = count + 1 html.append('<tr onMouseOver="this.className=\'hover\'"\ onMouseOut="this.className=\'normal\'">\ <td>%d)</td>' % count) cc, nc, tt, ct, __ = stats.stats[func] c = str(nc) if nc != cc: c = c + '/' + str(cc) html.append('<td>%s</td>' % c) html.append('<td>%f</td>' % tt) if nc == 0: html.append('<td>-</td>') else: html.append('<td>%f</td>' % (float(tt) / nc)) html.append('<td>%f</td>' % ct) if cc == 0: html.append('<td>-</td>') else: html.append('<td>%f</td>' % (float(ct) / cc)) nfls = cgi.escape(stats.func_std_string(func)) if nfls.split(':')[0] not in ['', 'profile'] and\ os.path.isfile(nfls.split(':')[0]): html.append('<td><a href="%s/%s%s?format=python#L%d">\ %s</a></td>' % (app_path, profile_id, nfls, func[1], nfls)) else: html.append('<td>%s</td>' % nfls) if not nfls.startswith('/'): nfls = '/' + nfls html.append('<td><a href="%s/%s%s?format=json">\ --></a></td></tr>' % (app_path, profile_id, nfls)) except Exception as ex: html.append("Exception:" % ex.message) return ''.join(html)
apache-2.0
LUTAN/tensorflow
tensorflow/contrib/factorization/python/ops/gmm.py
47
5877
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of Gaussian mixture model (GMM) clustering using tf.Learn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib import framework from tensorflow.contrib.factorization.python.ops import gmm_ops from tensorflow.contrib.framework.python.framework import checkpoint_utils from tensorflow.contrib.framework.python.ops import variables from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops.control_flow_ops import with_dependencies def _streaming_sum(scalar_tensor): """Create a sum metric and update op.""" sum_metric = framework.local_variable(constant_op.constant(0.0)) sum_update = sum_metric.assign_add(scalar_tensor) return sum_metric, sum_update class GMM(estimator.Estimator): """An estimator for GMM clustering.""" SCORES = 'scores' ASSIGNMENTS = 'assignments' ALL_SCORES = 'all_scores' def __init__(self, num_clusters, model_dir=None, random_seed=0, params='wmc', initial_clusters='random', covariance_type='full', config=None): """Creates a model for running GMM training and inference. Args: num_clusters: number of clusters to train. model_dir: the directory to save the model results and log files. random_seed: Python integer. Seed for PRNG used to initialize centers. params: Controls which parameters are updated in the training process. Can contain any combination of "w" for weights, "m" for means, and "c" for covars. initial_clusters: specifies how to initialize the clusters for training. See gmm_ops.gmm for the possible values. covariance_type: one of "full", "diag". config: See Estimator """ self._num_clusters = num_clusters self._params = params self._training_initial_clusters = initial_clusters self._covariance_type = covariance_type self._training_graph = None self._random_seed = random_seed super(GMM, self).__init__( model_fn=self._model_builder(), model_dir=model_dir, config=config) def predict_assignments(self, input_fn=None, batch_size=None, outputs=None): """See BaseEstimator.predict.""" results = self.predict(input_fn=input_fn, batch_size=batch_size, outputs=outputs) for result in results: yield result[GMM.ASSIGNMENTS] def score(self, input_fn=None, batch_size=None, steps=None): """Predict total sum of distances to nearest clusters. Note that this function is different from the corresponding one in sklearn which returns the negative of the sum of distances. Args: input_fn: see predict. batch_size: see predict. steps: see predict. Returns: Total sum of distances to nearest clusters. """ results = self.evaluate(input_fn=input_fn, batch_size=batch_size, steps=steps) return np.sum(results[GMM.SCORES]) def weights(self): """Returns the cluster weights.""" return checkpoint_utils.load_variable( self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT) def clusters(self): """Returns cluster centers.""" clusters = checkpoint_utils.load_variable( self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE) return np.squeeze(clusters, 1) def covariances(self): """Returns the covariances.""" return checkpoint_utils.load_variable( self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE) def _parse_tensor_or_dict(self, features): if isinstance(features, dict): return array_ops.concat([features[k] for k in sorted(features.keys())], 1) return features def _model_builder(self): """Creates a model function.""" def _model_fn(features, labels, mode): """Model function.""" assert labels is None, labels (all_scores, model_predictions, losses, training_op) = gmm_ops.gmm( self._parse_tensor_or_dict(features), self._training_initial_clusters, self._num_clusters, self._random_seed, self._covariance_type, self._params) incr_step = state_ops.assign_add(variables.get_global_step(), 1) loss = math_ops.reduce_sum(losses) training_op = with_dependencies([training_op, incr_step], loss) predictions = { GMM.ALL_SCORES: all_scores[0], GMM.ASSIGNMENTS: model_predictions[0][0], } eval_metric_ops = { GMM.SCORES: _streaming_sum(loss), } return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, eval_metric_ops=eval_metric_ops, loss=loss, train_op=training_op) return _model_fn
apache-2.0
russel1237/scikit-learn
examples/linear_model/plot_ransac.py
250
1673
""" =========================================== Robust linear model estimation using RANSAC =========================================== In this example we see how to robustly fit a linear model to faulty data using the RANSAC algorithm. """ import numpy as np from matplotlib import pyplot as plt from sklearn import linear_model, datasets n_samples = 1000 n_outliers = 50 X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1, n_informative=1, noise=10, coef=True, random_state=0) # Add outlier data np.random.seed(0) X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1)) y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers) # Fit line using all data model = linear_model.LinearRegression() model.fit(X, y) # Robustly fit linear model with RANSAC algorithm model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression()) model_ransac.fit(X, y) inlier_mask = model_ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) # Predict data of estimated models line_X = np.arange(-5, 5) line_y = model.predict(line_X[:, np.newaxis]) line_y_ransac = model_ransac.predict(line_X[:, np.newaxis]) # Compare estimated coefficients print("Estimated coefficients (true, normal, RANSAC):") print(coef, model.coef_, model_ransac.estimator_.coef_) plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers') plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers') plt.plot(line_X, line_y, '-k', label='Linear regressor') plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor') plt.legend(loc='lower right') plt.show()
bsd-3-clause
cainiaocome/scikit-learn
examples/linear_model/plot_sgd_weighted_samples.py
344
1458
""" ===================== SGD: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model # we create 20 points np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] y = [1] * 10 + [-1] * 10 sample_weight = 100 * np.abs(np.random.randn(20)) # and assign a bigger weight to the last 10 samples sample_weight[:10] *= 10 # plot the weighted data points xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) plt.figure() plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9, cmap=plt.cm.bone) ## fit the unweighted model clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100) clf.fit(X, y) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid']) ## fit the weighted model clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100) clf.fit(X, y, sample_weight=sample_weight) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed']) plt.legend([no_weights.collections[0], samples_weights.collections[0]], ["no weights", "with weights"], loc="lower left") plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
jckarter/swift
utils/dev-scripts/scurve_printer.py
37
2875
#!/usr/bin/env python # This is a simple script that takes in an scurve file produced by # csvcolumn_to_scurve and produces a png graph of the scurve. import argparse import csv import matplotlib.pyplot as plt import numpy as np FIELDS = ['N/total', 'New/Old'] def get_data(input_file): global FIELDS for row in csv.DictReader(input_file): yield (float(row[FIELDS[0]]), float(row[FIELDS[1]])) def main(): p = argparse.ArgumentParser() p.add_argument('input_csv_file', type=argparse.FileType('r')) p.add_argument('output_file', type=str) p.add_argument('-y-axis-num-tick-marks', type=int, help='The number of y tick marks to use above/below zero.') p.add_argument('-y-axis-min', type=float, help='Override the min y axis that we use') p.add_argument('-y-axis-max', type=float, help='Override the min y axis that we use') p.add_argument('-title', type=str, help='Title of the graph') p.add_argument('-x-axis-title', type=str, help='The title to use on the x-axis of the graph') p.add_argument('-y-axis-title', type=str, help='The title to use on the x-axis of the graph') args = p.parse_args() data = np.array(list(get_data(args.input_csv_file))) assert np.all(data >= 0) x = data[:, 0] y = data[:, 1] x_axis_title = args.x_axis_title or FIELDS[0] y_axis_title = args.y_axis_title or FIELDS[1] title = args.title or "{} vs {}".format(x_axis_title, y_axis_title) fig, ax = plt.subplots() fig.set_size_inches(18.5, 18.5) fig.suptitle(title, fontsize=20) ax.set_xlabel(x_axis_title, fontsize=20) ax.set_ylabel(y_axis_title, fontsize=20) ax.plot(x, y) ax.scatter(x, y) # To get good bounds, we: # # 1. Re-center our data at 0 by subtracting 1. This will give us the % # difference in between new and old (i.e. (new - old)/old) # # 2. Then we take the maximum absolute delta from zero and round to a # multiple of 5 away from zero. Lets call this value limit. # # 3. We set [min_y, max_y] = [1.0 - limit, 1.0 + limit] recentered_data = y - 1.0 max_magnitude = int(np.max(np.abs(recentered_data)) * 100.0) y_limit = float(((max_magnitude // 5) + 1) * 5) * 0.01 ax.set_xlim(0.0, 1.0) y_min = args.y_axis_min or 1.0 - y_limit y_max = args.y_axis_max or 1.0 + y_limit assert(y_min <= y_max) ax.set_ylim(y_min, y_max) ax.grid(True) ax.xaxis.set_ticks(np.arange(0.0, 1.0, 0.05)) if args.y_axis_num_tick_marks: y_delta = y_max - y_min y_tickmark_frequency = y_delta / float(args.y_axis_num_tick_marks) ax.yaxis.set_ticks(np.arange(y_min, y_max, y_tickmark_frequency)) plt.savefig(args.output_file) if __name__ == "__main__": main()
apache-2.0
moutai/scikit-learn
sklearn/metrics/scorer.py
23
13077
""" The :mod:`sklearn.metrics.scorer` submodule implements a flexible interface for model selection and evaluation using arbitrary score functions. A scorer object is a callable that can be passed to :class:`sklearn.model_selection.GridSearchCV` or :func:`sklearn.model_selection.cross_val_score` as the ``scoring`` parameter, to specify how a model should be evaluated. The signature of the call is ``(estimator, X, y)`` where ``estimator`` is the model to be evaluated, ``X`` is the test data and ``y`` is the ground truth labeling (or ``None`` in the case of unsupervised models). """ # Authors: Andreas Mueller <[email protected]> # Lars Buitinck # Arnaud Joly <[email protected]> # License: Simplified BSD from abc import ABCMeta, abstractmethod import numpy as np from . import (r2_score, median_absolute_error, mean_absolute_error, mean_squared_error, accuracy_score, f1_score, roc_auc_score, average_precision_score, precision_score, recall_score, log_loss) from .cluster import adjusted_rand_score from ..utils.multiclass import type_of_target from ..externals import six from ..base import is_regressor class _BaseScorer(six.with_metaclass(ABCMeta, object)): def __init__(self, score_func, sign, kwargs): self._kwargs = kwargs self._score_func = score_func self._sign = sign @abstractmethod def __call__(self, estimator, X, y, sample_weight=None): pass def __repr__(self): kwargs_string = "".join([", %s=%s" % (str(k), str(v)) for k, v in self._kwargs.items()]) return ("make_scorer(%s%s%s%s)" % (self._score_func.__name__, "" if self._sign > 0 else ", greater_is_better=False", self._factory_args(), kwargs_string)) def _factory_args(self): """Return non-default make_scorer arguments for repr.""" return "" class _PredictScorer(_BaseScorer): def __call__(self, estimator, X, y_true, sample_weight=None): """Evaluate predicted target values for X relative to y_true. Parameters ---------- estimator : object Trained estimator to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to estimator.predict. y_true : array-like Gold standard target values for X. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_pred = estimator.predict(X) if sample_weight is not None: return self._sign * self._score_func(y_true, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y_true, y_pred, **self._kwargs) class _ProbaScorer(_BaseScorer): def __call__(self, clf, X, y, sample_weight=None): """Evaluate predicted probabilities for X relative to y_true. Parameters ---------- clf : object Trained classifier to use for scoring. Must have a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.predict_proba. y : array-like Gold standard target values for X. These must be class labels, not probabilities. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_pred = clf.predict_proba(X) if sample_weight is not None: return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y, y_pred, **self._kwargs) def _factory_args(self): return ", needs_proba=True" class _ThresholdScorer(_BaseScorer): def __call__(self, clf, X, y, sample_weight=None): """Evaluate decision function output for X relative to y_true. Parameters ---------- clf : object Trained classifier to use for scoring. Must have either a decision_function method or a predict_proba method; the output of that is used to compute the score. X : array-like or sparse matrix Test data that will be fed to clf.decision_function or clf.predict_proba. y : array-like Gold standard target values for X. These must be class labels, not decision function values. sample_weight : array-like, optional (default=None) Sample weights. Returns ------- score : float Score function applied to prediction of estimator on X. """ y_type = type_of_target(y) if y_type not in ("binary", "multilabel-indicator"): raise ValueError("{0} format is not supported".format(y_type)) if is_regressor(clf): y_pred = clf.predict(X) else: try: y_pred = clf.decision_function(X) # For multi-output multi-class estimator if isinstance(y_pred, list): y_pred = np.vstack(p for p in y_pred).T except (NotImplementedError, AttributeError): y_pred = clf.predict_proba(X) if y_type == "binary": y_pred = y_pred[:, 1] elif isinstance(y_pred, list): y_pred = np.vstack([p[:, -1] for p in y_pred]).T if sample_weight is not None: return self._sign * self._score_func(y, y_pred, sample_weight=sample_weight, **self._kwargs) else: return self._sign * self._score_func(y, y_pred, **self._kwargs) def _factory_args(self): return ", needs_threshold=True" def get_scorer(scoring): if isinstance(scoring, six.string_types): try: scorer = SCORERS[scoring] except KeyError: raise ValueError('%r is not a valid scoring value. ' 'Valid options are %s' % (scoring, sorted(SCORERS.keys()))) else: scorer = scoring return scorer def _passthrough_scorer(estimator, *args, **kwargs): """Function that wraps estimator.score""" return estimator.score(*args, **kwargs) def check_scoring(estimator, scoring=None, allow_none=False): """Determine scorer from user options. A TypeError will be thrown if the estimator cannot be scored. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. allow_none : boolean, optional, default: False If no scoring is specified and the estimator has no score function, we can either return None or raise an exception. Returns ------- scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. """ has_scoring = scoring is not None if not hasattr(estimator, 'fit'): raise TypeError("estimator should be an estimator implementing " "'fit' method, %r was passed" % estimator) elif has_scoring: return get_scorer(scoring) elif hasattr(estimator, 'score'): return _passthrough_scorer elif allow_none: return None else: raise TypeError( "If no scoring is specified, the estimator passed should " "have a 'score' method. The estimator %r does not." % estimator) def make_scorer(score_func, greater_is_better=True, needs_proba=False, needs_threshold=False, **kwargs): """Make a scorer from a performance metric or loss function. This factory function wraps scoring functions for use in GridSearchCV and cross_val_score. It takes a score function, such as ``accuracy_score``, ``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision`` and returns a callable that scores an estimator's output. Read more in the :ref:`User Guide <scoring>`. Parameters ---------- score_func : callable, Score function (or loss function) with signature ``score_func(y, y_pred, **kwargs)``. greater_is_better : boolean, default=True Whether score_func is a score function (default), meaning high is good, or a loss function, meaning low is good. In the latter case, the scorer object will sign-flip the outcome of the score_func. needs_proba : boolean, default=False Whether score_func requires predict_proba to get probability estimates out of a classifier. needs_threshold : boolean, default=False Whether score_func takes a continuous decision certainty. This only works for binary classification using estimators that have either a decision_function or predict_proba method. For example ``average_precision`` or the area under the roc curve can not be computed using discrete predictions alone. **kwargs : additional arguments Additional parameters to be passed to score_func. Returns ------- scorer : callable Callable object that returns a scalar score; greater is better. Examples -------- >>> from sklearn.metrics import fbeta_score, make_scorer >>> ftwo_scorer = make_scorer(fbeta_score, beta=2) >>> ftwo_scorer make_scorer(fbeta_score, beta=2) >>> from sklearn.model_selection import GridSearchCV >>> from sklearn.svm import LinearSVC >>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]}, ... scoring=ftwo_scorer) """ sign = 1 if greater_is_better else -1 if needs_proba and needs_threshold: raise ValueError("Set either needs_proba or needs_threshold to True," " but not both.") if needs_proba: cls = _ProbaScorer elif needs_threshold: cls = _ThresholdScorer else: cls = _PredictScorer return cls(score_func, sign, kwargs) # Standard regression scores r2_scorer = make_scorer(r2_score) mean_squared_error_scorer = make_scorer(mean_squared_error, greater_is_better=False) mean_absolute_error_scorer = make_scorer(mean_absolute_error, greater_is_better=False) median_absolute_error_scorer = make_scorer(median_absolute_error, greater_is_better=False) # Standard Classification Scores accuracy_scorer = make_scorer(accuracy_score) f1_scorer = make_scorer(f1_score) # Score functions that need decision values roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_threshold=True) average_precision_scorer = make_scorer(average_precision_score, needs_threshold=True) precision_scorer = make_scorer(precision_score) recall_scorer = make_scorer(recall_score) # Score function for probabilistic classification log_loss_scorer = make_scorer(log_loss, greater_is_better=False, needs_proba=True) # Clustering scores adjusted_rand_scorer = make_scorer(adjusted_rand_score) SCORERS = dict(r2=r2_scorer, median_absolute_error=median_absolute_error_scorer, mean_absolute_error=mean_absolute_error_scorer, mean_squared_error=mean_squared_error_scorer, accuracy=accuracy_scorer, roc_auc=roc_auc_scorer, average_precision=average_precision_scorer, log_loss=log_loss_scorer, adjusted_rand_score=adjusted_rand_scorer) for name, metric in [('precision', precision_score), ('recall', recall_score), ('f1', f1_score)]: SCORERS[name] = make_scorer(metric) for average in ['macro', 'micro', 'samples', 'weighted']: qualified_name = '{0}_{1}'.format(name, average) SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average)
bsd-3-clause
diedthreetimes/VCrash
vanet-highway/trace_analyzer.py
1
5814
#! /usr/bin/python # A helper script for analyzing VCrash trace data # # @author: Kyle Benson import argparse import sys from trace import * def create_timesteps(msgs,resolution): #messages should be ordered in time start_time = msgs[0].time end_time = msgs[-1].time times = range(start_time/resolution, end_time/resolution) return times ################################################################################## ################# ARGUMENTS ########################################### ################################################################################## parser = argparse.ArgumentParser(description='A helper script for analyzing VCrash traces and visualizing the data.\ \nExample usage: ./trace_analyzer.py -t ../networkTrace1.csv ../networkTrace2.csv --vehicles ../vehicleTrace1.csv ../vehicleTrace2.csv -l firstLabel secondLabel -p -n',formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--trace-files','-t', '--traces', nargs='+', default=['../networkTrace.csv'],metavar='trace_files', help='file from which to read message trace data') parser.add_argument('--vehicle-files', '--vehicles', nargs='+', default=['../vehicleTrace.csv'],metavar='vehicle_files', help='file from which to read vehicle trace data') parser.add_argument('--penetration','-pen','-p',action='store_true', help='display penetration of message in percent of vehicles that receive it over time') parser.add_argument('--num-packets','-n',action='store_true', help='display total packets sent in each timestep over time') parser.add_argument('--labels','-l',nargs='+', help='names given to data sets contained in specified files') args = parser.parse_args() if len(args.trace_files) != len(args.vehicle_files): print "The number of message and vehicle trace files should be the same!\n" sys.exit(-1) ################################# MAIN #################################### msg_traces = [] for fname in args.trace_files: with open(fname) as f: msg_traces.append(f.readlines()) veh_traces = [] for fname in args.vehicle_files: with open(fname) as f: veh_traces.append(f.readlines()) #print "Total # of trace lines: %d" % len() messages = [] for traces in msg_traces: forwarded = [Message(x) for x in traces if x.startswith('Packet forwarded.')] ignored = [Message(x) for x in traces if x.startswith('Ignoring packet.')] created = [Message(x) for x in traces if x.startswith('Packet created.')] all_msgs = [Message(x) for x in traces] messages.append(MessageTraceData(all_msgs,forwarded,ignored,created)) #crash_info = [x.created[0].time for x in messages] vehicles = [] for traces in veh_traces: #for (i,traces) in enumerate(veh_traces): vehs = {} for v in (Vehicle(x.strip()) for x in traces): #for v in (Vehicle(x.strip()) for x in traces if x.startswith(str(crash_times[i]))): #within_distance = False #if sqrt( if v.id not in vehs:# and within_distance: vehs[v.id] = 1 vehicles.append(vehs) nvehicles = [len(v) for v in vehicles] #print "there were %d forwards, %d ignores, and %d vehicles total" % (len(forwards), len(ignores), len(vehicles)) ################################### PLOTS ################################ nplots = (0 if args.penetration is False else 1) + (0 if args.num_packets is False else len(messages)) if nplots == 0: print "No plots requested." sys.exit(0) from matplotlib.pyplot import subplot,plot,bar,legend import matplotlib.pyplot as plt #from math import ceil,sqrt #try for smart subplot arrangements if nplots > 3: nrows = int(sqrt(nplots)+0.5) ncols = ceil(nplots/float(nrows)) else: nrows = 1 ncols = nplots next_axes = 1 if args.penetration: data_to_plot = [] for (i,msgs) in enumerate(messages): vehs_notified = {} #vehicles always forward message once they see it msgs = msgs.forwarded resolution = 100000000 #0.1 second resolution times = create_timesteps(msgs,resolution) percentages = [] for t in times: while msgs[0].time < t*resolution: vehs_notified[msgs[0].id] = vehs_notified.get(msgs[0].id, 0) + 1 msgs = msgs[1:] percentages.append((len(vehs_notified) +1)/float(nvehicles[i]) *100) min_time = times[0] times = [(t-min_time)/10 for t in times] data_to_plot.append(times) data_to_plot.append(percentages) subplot(nrows,ncols,next_axes) plot(*data_to_plot) if args.labels is not None: legend(args.labels, loc='lower right') plt.title("Message coverage over time") plt.xlabel("Time offset from crash event (sec)") plt.ylabel("% of vehicles that have received the message") plt.plot() next_axes += 1 if args.num_packets: for (i,msgs) in enumerate(messages): msgs = sorted(msgs.forwarded + msgs.created) resolution = 1000000000 #second resolution times = create_timesteps(msgs,resolution) npackets = [] for t in times: npacs = 0 while msgs[0].time < t*resolution: npacs += 1 msgs = msgs[1:] npackets.append(npacs) min_time = times[0] times = [t-min_time for t in times] subplot(nrows,ncols,next_axes) bar(times, npackets) title = "Number of packets sent" if args.labels is not None: title = title + ': ' + args.labels[i] plt.title(title) plt.xlabel("Time since crash event (sec)") plt.ylabel("Packets sent per second") plt.plot() next_axes += 1 plt.show()
gpl-2.0
anderspitman/scikit-bio
skbio/io/format/tests/test_blast6.py
2
6174
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import unittest import pandas as pd import numpy as np from skbio.util import get_data_path, assert_data_frame_almost_equal from skbio.io.format.blast6 import _blast6_to_data_frame class TestBlast6Reader(unittest.TestCase): def test_default_valid_single_line(self): fp = get_data_path('blast6_default_single_line') df = _blast6_to_data_frame(fp, default_columns=True) exp = pd.DataFrame([['query1', 'subject2', 75.0, 8.0, 2.0, 0.0, 1.0, 8.0, 2.0, 9.0, 0.06, 11.5]], columns=['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']) assert_data_frame_almost_equal(df, exp) def test_default_valid_multi_line(self): fp = get_data_path('blast6_default_multi_line') df = _blast6_to_data_frame(fp, default_columns=True) exp = pd.DataFrame([['query1', 'subject2', 100.00, 8.0, 0.0, 0.0, 1.0, 8.0, 3.0, 10.0, 9e-05, 16.9], ['query1', 'subject2', 75.00, 8.0, 2.0, 0.0, 1.0, 8.0, 2.0, 9.0, 0.060, 11.5], ['query2', 'subject1', 71.43, 7.0, 2.0, 0.0, 1.0, 7.0, 1.0, 7.0, 0.044, 11.9]], columns=['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']) assert_data_frame_almost_equal(df, exp) def test_custom_valid_single_line(self): fp = get_data_path('blast6_custom_single_line') df = _blast6_to_data_frame(fp, columns=['qacc', 'qseq', 'btop', 'sframe', 'ppos', 'positive', 'gaps']) exp = pd.DataFrame([['query1', 'PAAWWWWW', 8.0, 1.0, 100.00, 8.0, 0.0]], columns=['qacc', 'qseq', 'btop', 'sframe', 'ppos', 'positive', 'gaps']) assert_data_frame_almost_equal(df, exp) def test_custom_valid_multi_line(self): fp = get_data_path('blast6_custom_multi_line') df = _blast6_to_data_frame(fp, columns=['sacc', 'score', 'gapopen', 'qcovs', 'sblastnames', 'sallacc', 'qaccver']) exp = pd.DataFrame([['subject2', 32.0, 0.0, 100.0, np.nan, 'subject2', 'query1'], ['subject2', 18.0, 0.0, 100.0, np.nan, 'subject2', 'query1'], ['subject1', 19.0, 0.0, 70.0, np.nan, 'subject1', 'query2']], columns=['sacc', 'score', 'gapopen', 'qcovs', 'sblastnames', 'sallacc', 'qaccver']) exp['sblastnames'] = exp['sblastnames'].astype(object) assert_data_frame_almost_equal(df, exp) def test_valid_nan_handling(self): fp = get_data_path('blast6_custom_mixed_nans') df = _blast6_to_data_frame(fp, columns=['qacc', 'qseq', 'btop', 'sframe', 'ppos', 'positive', 'gaps']) exp = pd.DataFrame([[np.nan, 'PAAWWWWW', 8.0, 1.0, 100.00, np.nan, 0.0], ['query1', np.nan, 8.0, 1.0, np.nan, 8.0, 0.0]], columns=['qacc', 'qseq', 'btop', 'sframe', 'ppos', 'positive', 'gaps']) assert_data_frame_almost_equal(df, exp) def test_valid_minimal(self): fp = get_data_path('blast6_custom_minimal') df = _blast6_to_data_frame(fp, columns=['sacc']) exp = pd.DataFrame([['subject2']], columns=['sacc']) assert_data_frame_almost_equal(df, exp) def test_custom_and_default_passed_error(self): fp = get_data_path('blast6_default_single_line') with self.assertRaisesRegex(ValueError, "`columns` and `default_columns`"): _blast6_to_data_frame(fp, columns=['qseqid'], default_columns=True) def test_no_columns_passed_error(self): fp = get_data_path('blast6_default_single_line') with self.assertRaisesRegex(ValueError, "Either `columns` or `default_columns`"): _blast6_to_data_frame(fp) def test_wrong_amount_of_columns_error(self): fp = get_data_path('blast6_invalid_number_of_columns') with self.assertRaisesRegex( ValueError, "Specified number of columns \(12\).*\(10\)"): _blast6_to_data_frame(fp, default_columns=True) def test_different_data_in_same_column(self): fp = get_data_path('blast6_invalid_type_in_column') with self.assertRaises(ValueError): _blast6_to_data_frame(fp, default_columns=True) def test_wrong_column_name_error(self): fp = get_data_path('blast6_default_single_line') with self.assertRaisesRegex(ValueError, "Unrecognized column.*'abcd'"): _blast6_to_data_frame(fp, columns=['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'abcd', 'bitscore']) if __name__ == '__main__': unittest.main()
bsd-3-clause
jakobworldpeace/scikit-learn
sklearn/base.py
13
19725
"""Base classes for all estimators.""" # Author: Gael Varoquaux <[email protected]> # License: BSD 3 clause import copy import warnings import numpy as np from scipy import sparse from .externals import six from .utils.fixes import signature from . import __version__ ############################################################################## def _first_and_last_element(arr): """Returns first and last element of numpy array or sparse matrix.""" if isinstance(arr, np.ndarray) or hasattr(arr, 'data'): # numpy array or sparse matrix with .data attribute data = arr.data if sparse.issparse(arr) else arr return data.flat[0], data.flat[-1] else: # Sparse matrices without .data attribute. Only dok_matrix at # the time of writing, in this case indexing is fast return arr[0, 0], arr[-1, -1] def clone(estimator, safe=True): """Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator : estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe : boolean, optional If safe is false, clone will fall back to a deepcopy on objects that are not estimators. """ estimator_type = type(estimator) # XXX: not handling dictionaries if estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) elif not hasattr(estimator, 'get_params'): if not safe: return copy.deepcopy(estimator) else: raise TypeError("Cannot clone object '%s' (type %s): " "it does not seem to be a scikit-learn estimator " "as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator))) klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for name, param in six.iteritems(new_object_params): new_object_params[name] = clone(param, safe=False) new_object = klass(**new_object_params) params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if param1 is param2: # this should always happen continue if isinstance(param1, np.ndarray): # For most ndarrays, we do not test for complete equality if not isinstance(param2, type(param1)): equality_test = False elif (param1.ndim > 0 and param1.shape[0] > 0 and isinstance(param2, np.ndarray) and param2.ndim > 0 and param2.shape[0] > 0): equality_test = ( param1.shape == param2.shape and param1.dtype == param2.dtype and (_first_and_last_element(param1) == _first_and_last_element(param2)) ) else: equality_test = np.all(param1 == param2) elif sparse.issparse(param1): # For sparse matrices equality doesn't work if not sparse.issparse(param2): equality_test = False elif param1.size == 0 or param2.size == 0: equality_test = ( param1.__class__ == param2.__class__ and param1.size == 0 and param2.size == 0 ) else: equality_test = ( param1.__class__ == param2.__class__ and (_first_and_last_element(param1) == _first_and_last_element(param2)) and param1.nnz == param2.nnz and param1.shape == param2.shape ) else: # fall back on standard equality equality_test = param1 == param2 if equality_test: warnings.warn("Estimator %s modifies parameters in __init__." " This behavior is deprecated as of 0.18 and " "support for this behavior will be removed in 0.20." % type(estimator).__name__, DeprecationWarning) else: raise RuntimeError('Cannot clone object %s, as the constructor ' 'does not seem to set parameter %s' % (estimator, name)) return new_object ############################################################################### def _pprint(params, offset=0, printer=repr): """Pretty print the dictionary 'params' Parameters ---------- params : dict The dictionary to pretty print offset : int The offset in characters to add at the begin of each line. printer : callable The function to convert entries to strings, typically the builtin str or repr """ # Do a multi-line justified repr: options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) params_list = list() this_line_length = offset line_sep = ',\n' + (1 + offset // 2) * ' ' for i, (k, v) in enumerate(sorted(six.iteritems(params))): if type(v) is float: # use str for representing floating point numbers # this way we get consistent representation across # architectures and versions. this_repr = '%s=%s' % (k, str(v)) else: # use repr of the rest this_repr = '%s=%s' % (k, printer(v)) if len(this_repr) > 500: this_repr = this_repr[:300] + '...' + this_repr[-100:] if i > 0: if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr): params_list.append(line_sep) this_line_length = len(line_sep) else: params_list.append(', ') this_line_length += 2 params_list.append(this_repr) this_line_length += len(this_repr) np.set_printoptions(**options) lines = ''.join(params_list) # Strip trailing space to avoid nightmare in doctests lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n')) return lines ############################################################################### class BaseEstimator(object): """Base class for all estimators in scikit-learn Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). """ @classmethod def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = signature(init) # Consider the constructor parameters excluding 'self' parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError("scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature)) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in six.iteritems(params): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self def __repr__(self): class_name = self.__class__.__name__ return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False), offset=len(class_name),),) def __getstate__(self): try: state = super(BaseEstimator, self).__getstate__() except AttributeError: state = self.__dict__.copy() if type(self).__module__.startswith('sklearn.'): return dict(state.items(), _sklearn_version=__version__) else: return state def __setstate__(self, state): if type(self).__module__.startswith('sklearn.'): pickle_version = state.pop("_sklearn_version", "pre-0.18") if pickle_version != __version__: warnings.warn( "Trying to unpickle estimator {0} from version {1} when " "using version {2}. This might lead to breaking code or " "invalid results. Use at your own risk.".format( self.__class__.__name__, pickle_version, __version__), UserWarning) try: super(BaseEstimator, self).__setstate__(state) except AttributeError: self.__dict__.update(state) ############################################################################### class ClassifierMixin(object): """Mixin class for all classifiers in scikit-learn.""" _estimator_type = "classifier" def score(self, X, y, sample_weight=None): """Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) wrt. y. """ from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight) ############################################################################### class RegressorMixin(object): """Mixin class for all regression estimators in scikit-learn.""" _estimator_type = "regressor" def score(self, X, y, sample_weight=None): """Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the regression sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual sum of squares ((y_true - y_true.mean()) ** 2).sum(). Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float R^2 of self.predict(X) wrt. y. """ from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='variance_weighted') ############################################################################### class ClusterMixin(object): """Mixin class for all cluster estimators in scikit-learn.""" _estimator_type = "clusterer" def fit_predict(self, X, y=None): """Performs clustering on X and returns cluster labels. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data. Returns ------- y : ndarray, shape (n_samples,) cluster labels """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm self.fit(X) return self.labels_ class BiclusterMixin(object): """Mixin class for all bicluster estimators in scikit-learn""" @property def biclusters_(self): """Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members. """ return self.rows_, self.columns_ def get_indices(self, i): """Row and column indices of the i'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Returns ------- row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the dataset that belong to the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] return np.nonzero(rows)[0], np.nonzero(columns)[0] def get_shape(self, i): """Shape of the i'th bicluster. Returns ------- shape : (int, int) Number of rows and columns (resp.) in the bicluster. """ indices = self.get_indices(i) return tuple(len(i) for i in indices) def get_submatrix(self, i, data): """Returns the submatrix corresponding to bicluster `i`. Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist. """ from .utils.validation import check_array data = check_array(data, accept_sparse='csr') row_ind, col_ind = self.get_indices(i) return data[row_ind[:, np.newaxis], col_ind] ############################################################################### class TransformerMixin(object): """Mixin class for all transformers in scikit-learn.""" def fit_transform(self, X, y=None, **fit_params): """Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters ---------- X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns ------- X_new : numpy array of shape [n_samples, n_features_new] Transformed array. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm if y is None: # fit method of arity 1 (unsupervised transformation) return self.fit(X, **fit_params).transform(X) else: # fit method of arity 2 (supervised transformation) return self.fit(X, y, **fit_params).transform(X) class DensityMixin(object): """Mixin class for all density estimators in scikit-learn.""" _estimator_type = "DensityEstimator" def score(self, X, y=None): """Returns the score of the model on the data X Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- score : float """ pass ############################################################################### class MetaEstimatorMixin(object): """Mixin class for all meta estimators in scikit-learn.""" # this is just a tag for the moment ############################################################################### def is_classifier(estimator): """Returns True if the given estimator is (probably) a classifier.""" return getattr(estimator, "_estimator_type", None) == "classifier" def is_regressor(estimator): """Returns True if the given estimator is (probably) a regressor.""" return getattr(estimator, "_estimator_type", None) == "regressor"
bsd-3-clause
zimmermegan/MARDA
nltk-3.0.3/nltk/parse/transitionparser.py
5
31354
# Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers # # Author: Long Duong <[email protected]> # # Copyright (C) 2001-2015 NLTK Project # URL: <http://nltk.org/> # For license information, see LICENSE.TXT from __future__ import absolute_import from __future__ import division from __future__ import print_function import tempfile import pickle from os import remove from copy import deepcopy from operator import itemgetter try: from numpy import array from scipy import sparse from sklearn.datasets import load_svmlight_file from sklearn import svm except ImportError: pass from nltk.parse import ParserI, DependencyGraph, DependencyEvaluator class Configuration(object): """ Class for holding configuration which is the partial analysis of the input sentence. The transition based parser aims at finding set of operators that transfer the initial configuration to the terminal configuration. The configuration includes: - Stack: for storing partially proceeded words - Buffer: for storing remaining input words - Set of arcs: for storing partially built dependency tree This class also provides a method to represent a configuration as list of features. """ def __init__(self, dep_graph): """ :param dep_graph: the representation of an input in the form of dependency graph. :type dep_graph: DependencyGraph where the dependencies are not specified. """ # dep_graph.nodes contain list of token for a sentence self.stack = [0] # The root element self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer self.arcs = [] # empty set of arc self._tokens = dep_graph.nodes self._max_address = len(self.buffer) def __str__(self): return 'Stack : ' + \ str(self.stack) + ' Buffer : ' + str(self.buffer) + ' Arcs : ' + str(self.arcs) def _check_informative(self, feat, flag=False): """ Check whether a feature is informative The flag control whether "_" is informative or not """ if feat is None: return False if feat == '': return False if flag is False: if feat == '_': return False return True def extract_features(self): """ Extract the set of features for the current configuration. Implement standard features as describe in Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre. Please note that these features are very basic. :return: list(str) """ result = [] # Todo : can come up with more complicated features set for better # performance. if len(self.stack) > 0: # Stack 0 stack_idx0 = self.stack[len(self.stack) - 1] token = self._tokens[stack_idx0] if self._check_informative(token['word'], True): result.append('STK_0_FORM_' + token['word']) if 'lemma' in token and self._check_informative(token['lemma']): result.append('STK_0_LEMMA_' + token['lemma']) if self._check_informative(token['tag']): result.append('STK_0_POS_' + token['tag']) if 'feats' in token and self._check_informative(token['feats']): feats = token['feats'].split("|") for feat in feats: result.append('STK_0_FEATS_' + feat) # Stack 1 if len(self.stack) > 1: stack_idx1 = self.stack[len(self.stack) - 2] token = self._tokens[stack_idx1] if self._check_informative(token['tag']): result.append('STK_1_POS_' + token['tag']) # Left most, right most dependency of stack[0] left_most = 1000000 right_most = -1 dep_left_most = '' dep_right_most = '' for (wi, r, wj) in self.arcs: if wi == stack_idx0: if (wj > wi) and (wj > right_most): right_most = wj dep_right_most = r if (wj < wi) and (wj < left_most): left_most = wj dep_left_most = r if self._check_informative(dep_left_most): result.append('STK_0_LDEP_' + dep_left_most) if self._check_informative(dep_right_most): result.append('STK_0_RDEP_' + dep_right_most) # Check Buffered 0 if len(self.buffer) > 0: # Buffer 0 buffer_idx0 = self.buffer[0] token = self._tokens[buffer_idx0] if self._check_informative(token['word'], True): result.append('BUF_0_FORM_' + token['word']) if 'lemma' in token and self._check_informative(token['lemma']): result.append('BUF_0_LEMMA_' + token['lemma']) if self._check_informative(token['tag']): result.append('BUF_0_POS_' + token['tag']) if 'feats' in token and self._check_informative(token['feats']): feats = token['feats'].split("|") for feat in feats: result.append('BUF_0_FEATS_' + feat) # Buffer 1 if len(self.buffer) > 1: buffer_idx1 = self.buffer[1] token = self._tokens[buffer_idx1] if self._check_informative(token['word'], True): result.append('BUF_1_FORM_' + token['word']) if self._check_informative(token['tag']): result.append('BUF_1_POS_' + token['tag']) if len(self.buffer) > 2: buffer_idx2 = self.buffer[2] token = self._tokens[buffer_idx2] if self._check_informative(token['tag']): result.append('BUF_2_POS_' + token['tag']) if len(self.buffer) > 3: buffer_idx3 = self.buffer[3] token = self._tokens[buffer_idx3] if self._check_informative(token['tag']): result.append('BUF_3_POS_' + token['tag']) # Left most, right most dependency of stack[0] left_most = 1000000 right_most = -1 dep_left_most = '' dep_right_most = '' for (wi, r, wj) in self.arcs: if wi == buffer_idx0: if (wj > wi) and (wj > right_most): right_most = wj dep_right_most = r if (wj < wi) and (wj < left_most): left_most = wj dep_left_most = r if self._check_informative(dep_left_most): result.append('BUF_0_LDEP_' + dep_left_most) if self._check_informative(dep_right_most): result.append('BUF_0_RDEP_' + dep_right_most) return result class Transition(object): """ This class defines a set of transition which is applied to a configuration to get another configuration Note that for different parsing algorithm, the transition is different. """ # Define set of transitions LEFT_ARC = 'LEFTARC' RIGHT_ARC = 'RIGHTARC' SHIFT = 'SHIFT' REDUCE = 'REDUCE' def __init__(self, alg_option): """ :param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm :type alg_option: str """ self._algo = alg_option if alg_option not in [ TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER]: raise ValueError(" Currently we only support %s and %s " % (TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER)) def left_arc(self, conf, relation): """ Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager :param configuration: is the current configuration :return : A new configuration or -1 if the pre-condition is not satisfied """ if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): return -1 if conf.buffer[0] == 0: # here is the Root element return -1 idx_wi = conf.stack[len(conf.stack) - 1] flag = True if self._algo == TransitionParser.ARC_EAGER: for (idx_parent, r, idx_child) in conf.arcs: if idx_child == idx_wi: flag = False if flag: conf.stack.pop() idx_wj = conf.buffer[0] conf.arcs.append((idx_wj, relation, idx_wi)) else: return -1 def right_arc(self, conf, relation): """ Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager :param configuration: is the current configuration :return : A new configuration or -1 if the pre-condition is not satisfied """ if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0): return -1 if self._algo == TransitionParser.ARC_STANDARD: idx_wi = conf.stack.pop() idx_wj = conf.buffer[0] conf.buffer[0] = idx_wi conf.arcs.append((idx_wi, relation, idx_wj)) else: # arc-eager idx_wi = conf.stack[len(conf.stack) - 1] idx_wj = conf.buffer.pop(0) conf.stack.append(idx_wj) conf.arcs.append((idx_wi, relation, idx_wj)) def reduce(self, conf): """ Note that the algorithm for reduce is only available for arc-eager :param configuration: is the current configuration :return : A new configuration or -1 if the pre-condition is not satisfied """ if self._algo != TransitionParser.ARC_EAGER: return -1 if len(conf.stack) <= 0: return -1 idx_wi = conf.stack[len(conf.stack) - 1] flag = False for (idx_parent, r, idx_child) in conf.arcs: if idx_child == idx_wi: flag = True if flag: conf.stack.pop() # reduce it else: return -1 def shift(self, conf): """ Note that the algorithm for shift is the SAME for arc-standard and arc-eager :param configuration: is the current configuration :return : A new configuration or -1 if the pre-condition is not satisfied """ if len(conf.buffer) <= 0: return -1 idx_wi = conf.buffer.pop(0) conf.stack.append(idx_wi) class TransitionParser(ParserI): """ Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager" """ ARC_STANDARD = 'arc-standard' ARC_EAGER = 'arc-eager' def __init__(self, algorithm): """ :param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm :type algorithm: str """ if not(algorithm in [self.ARC_STANDARD, self.ARC_EAGER]): raise ValueError(" Currently we only support %s and %s " % (self.ARC_STANDARD, self.ARC_EAGER)) self._algorithm = algorithm self._dictionary = {} self._transition = {} self._match_transition = {} def _get_dep_relation(self, idx_parent, idx_child, depgraph): p_node = depgraph.nodes[idx_parent] c_node = depgraph.nodes[idx_child] if c_node['word'] is None: return None # Root word if c_node['head'] == p_node['address']: return c_node['rel'] else: return None def _convert_to_binary_features(self, features): """ :param features: list of feature string which is needed to convert to binary features :type features: list(str) :return : string of binary features in libsvm format which is 'featureID:value' pairs """ unsorted_result = [] for feature in features: self._dictionary.setdefault(feature, len(self._dictionary)) unsorted_result.append(self._dictionary[feature]) # Default value of each feature is 1.0 return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result)) def _is_projective(self, depgraph): arc_list = [] for key in depgraph.nodes: node = depgraph.nodes[key] if 'head' in node: childIdx = node['address'] parentIdx = node['head'] if parentIdx is not None: arc_list.append((parentIdx, childIdx)) for (parentIdx, childIdx) in arc_list: # Ensure that childIdx < parentIdx if childIdx > parentIdx: temp = childIdx childIdx = parentIdx parentIdx = temp for k in range(childIdx + 1, parentIdx): for m in range(len(depgraph.nodes)): if (m < childIdx) or (m > parentIdx): if (k, m) in arc_list: return False if (m, k) in arc_list: return False return True def _write_to_file(self, key, binary_features, input_file): """ write the binary features to input file and update the transition dictionary """ self._transition.setdefault(key, len(self._transition) + 1) self._match_transition[self._transition[key]] = key input_str = str(self._transition[key]) + ' ' + binary_features + '\n' input_file.write(input_str.encode('utf-8')) def _create_training_examples_arc_std(self, depgraphs, input_file): """ Create the training example in the libsvm format and write it to the input_file. Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009) """ operation = Transition(self.ARC_STANDARD) count_proj = 0 training_seq = [] for depgraph in depgraphs: if not self._is_projective(depgraph): continue count_proj += 1 conf = Configuration(depgraph) while len(conf.buffer) > 0: b0 = conf.buffer[0] features = conf.extract_features() binary_features = self._convert_to_binary_features(features) if len(conf.stack) > 0: s0 = conf.stack[len(conf.stack) - 1] # Left-arc operation rel = self._get_dep_relation(b0, s0, depgraph) if rel is not None: key = Transition.LEFT_ARC + ':' + rel self._write_to_file(key, binary_features, input_file) operation.left_arc(conf, rel) training_seq.append(key) continue # Right-arc operation rel = self._get_dep_relation(s0, b0, depgraph) if rel is not None: precondition = True # Get the max-index of buffer maxID = conf._max_address for w in range(maxID + 1): if w != b0: relw = self._get_dep_relation(b0, w, depgraph) if relw is not None: if (b0, relw, w) not in conf.arcs: precondition = False if precondition: key = Transition.RIGHT_ARC + ':' + rel self._write_to_file( key, binary_features, input_file) operation.right_arc(conf, rel) training_seq.append(key) continue # Shift operation as the default key = Transition.SHIFT self._write_to_file(key, binary_features, input_file) operation.shift(conf) training_seq.append(key) print(" Number of training examples : " + str(len(depgraphs))) print(" Number of valid (projective) examples : " + str(count_proj)) return training_seq def _create_training_examples_arc_eager(self, depgraphs, input_file): """ Create the training example in the libsvm format and write it to the input_file. Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre """ operation = Transition(self.ARC_EAGER) countProj = 0 training_seq = [] for depgraph in depgraphs: if not self._is_projective(depgraph): continue countProj += 1 conf = Configuration(depgraph) while len(conf.buffer) > 0: b0 = conf.buffer[0] features = conf.extract_features() binary_features = self._convert_to_binary_features(features) if len(conf.stack) > 0: s0 = conf.stack[len(conf.stack) - 1] # Left-arc operation rel = self._get_dep_relation(b0, s0, depgraph) if rel is not None: key = Transition.LEFT_ARC + ':' + rel self._write_to_file(key, binary_features, input_file) operation.left_arc(conf, rel) training_seq.append(key) continue # Right-arc operation rel = self._get_dep_relation(s0, b0, depgraph) if rel is not None: key = Transition.RIGHT_ARC + ':' + rel self._write_to_file(key, binary_features, input_file) operation.right_arc(conf, rel) training_seq.append(key) continue # reduce operation flag = False for k in range(s0): if self._get_dep_relation(k, b0, depgraph) is not None: flag = True if self._get_dep_relation(b0, k, depgraph) is not None: flag = True if flag: key = Transition.REDUCE self._write_to_file(key, binary_features, input_file) operation.reduce(conf) training_seq.append(key) continue # Shift operation as the default key = Transition.SHIFT self._write_to_file(key, binary_features, input_file) operation.shift(conf) training_seq.append(key) print(" Number of training examples : " + str(len(depgraphs))) print(" Number of valid (projective) examples : " + str(countProj)) return training_seq def train(self, depgraphs, modelfile): """ :param depgraphs : list of DependencyGraph as the training data :type depgraphs : DependencyGraph :param modelfile : file name to save the trained model :type modelfile : str """ try: input_file = tempfile.NamedTemporaryFile( prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) if self._algorithm == self.ARC_STANDARD: self._create_training_examples_arc_std(depgraphs, input_file) else: self._create_training_examples_arc_eager(depgraphs, input_file) input_file.close() # Using the temporary file to train the libsvm classifier x_train, y_train = load_svmlight_file(input_file.name) # The parameter is set according to the paper: # Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre # Todo : because of probability = True => very slow due to # cross-validation. Need to improve the speed here model = svm.SVC( kernel='poly', degree=2, coef0=0, gamma=0.2, C=0.5, verbose=True, probability=True) model.fit(x_train, y_train) # Save the model to file name (as pickle) pickle.dump(model, open(modelfile, 'wb')) finally: remove(input_file.name) def parse(self, depgraphs, modelFile): """ :param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy :type depgraphs: list(DependencyGraph) :param modelfile: the model file :type modelfile: str :return: list (DependencyGraph) with the 'head' and 'rel' information """ result = [] # First load the model model = pickle.load(open(modelFile, 'rb')) operation = Transition(self._algorithm) for depgraph in depgraphs: conf = Configuration(depgraph) while len(conf.buffer) > 0: features = conf.extract_features() col = [] row = [] data = [] for feature in features: if feature in self._dictionary: col.append(self._dictionary[feature]) row.append(0) data.append(1.0) np_col = array(sorted(col)) # NB : index must be sorted np_row = array(row) np_data = array(data) x_test = sparse.csr_matrix((np_data, (np_row, np_col)), shape=(1, len(self._dictionary))) # It's best to use decision function as follow BUT it's not supported yet for sparse SVM # Using decision funcion to build the votes array #dec_func = model.decision_function(x_test)[0] #votes = {} #k = 0 # for i in range(len(model.classes_)): # for j in range(i+1, len(model.classes_)): # #if dec_func[k] > 0: # votes.setdefault(i,0) # votes[i] +=1 # else: # votes.setdefault(j,0) # votes[j] +=1 # k +=1 # Sort votes according to the values #sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True) # We will use predict_proba instead of decision_function prob_dict = {} pred_prob = model.predict_proba(x_test)[0] for i in range(len(pred_prob)): prob_dict[i] = pred_prob[i] sorted_Prob = sorted( prob_dict.items(), key=itemgetter(1), reverse=True) # Note that SHIFT is always a valid operation for (y_pred_idx, confidence) in sorted_Prob: #y_pred = model.predict(x_test)[0] # From the prediction match to the operation y_pred = model.classes_[y_pred_idx] if y_pred in self._match_transition: strTransition = self._match_transition[y_pred] baseTransition = strTransition.split(":")[0] if baseTransition == Transition.LEFT_ARC: if operation.left_arc(conf, strTransition.split(":")[1]) != -1: break elif baseTransition == Transition.RIGHT_ARC: if operation.right_arc(conf, strTransition.split(":")[1]) != -1: break elif baseTransition == Transition.REDUCE: if operation.reduce(conf) != -1: break elif baseTransition == Transition.SHIFT: if operation.shift(conf) != -1: break else: raise ValueError("The predicted transition is not recognized, expected errors") # Finish with operations build the dependency graph from Conf.arcs new_depgraph = deepcopy(depgraph) for key in new_depgraph.nodes: node = new_depgraph.nodes[key] node['rel'] = '' # With the default, all the token depend on the Root node['head'] = 0 for (head, rel, child) in conf.arcs: c_node = new_depgraph.nodes[child] c_node['head'] = head c_node['rel'] = rel result.append(new_depgraph) return result def demo(): """ >>> from nltk.parse import DependencyGraph, DependencyEvaluator >>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition >>> gold_sent = DependencyGraph(\""" ... Economic JJ 2 ATT ... news NN 3 SBJ ... has VBD 0 ROOT ... little JJ 5 ATT ... effect NN 3 OBJ ... on IN 5 ATT ... financial JJ 8 ATT ... markets NNS 6 PC ... . . 3 PU ... \""") >>> conf = Configuration(gold_sent) ###################### Check the Initial Feature ######################## >>> print(', '.join(conf.extract_features())) STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ ###################### Check The Transition ####################### Check the Initialized Configuration >>> print(conf) Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : [] A. Do some transition checks for ARC-STANDARD >>> operation = Transition('arc-standard') >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.left_arc(conf,"SBJ") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.shift(conf) >>> operation.left_arc(conf, "ATT") Middle Configuration and Features Check >>> print(conf) Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)] >>> print(', '.join(conf.extract_features())) STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT >>> operation.right_arc(conf, "PC") >>> operation.right_arc(conf, "ATT") >>> operation.right_arc(conf, "OBJ") >>> operation.shift(conf) >>> operation.right_arc(conf, "PU") >>> operation.right_arc(conf, "ROOT") >>> operation.shift(conf) Terminated Configuration Check >>> print(conf) Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)] B. Do some transition checks for ARC-EAGER >>> conf = Configuration(gold_sent) >>> operation = Transition('arc-eager') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'SBJ') >>> operation.right_arc(conf,'ROOT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'OBJ') >>> operation.right_arc(conf,'ATT') >>> operation.shift(conf) >>> operation.left_arc(conf,'ATT') >>> operation.right_arc(conf,'PC') >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.reduce(conf) >>> operation.right_arc(conf,'PU') >>> print(conf) Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)] ###################### Check The Training Function ####################### A. Check the ARC-STANDARD training >>> import tempfile >>> import os >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False) >>> parser_std = TransitionParser('arc-standard') >>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT >>> parser_std.train([gold_sent],'temp.arcstd.model') Number of training examples : 1 Number of valid (projective) examples : 1 ... >>> remove(input_file.name) B. Check the ARC-EAGER training >>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False) >>> parser_eager = TransitionParser('arc-eager') >>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file))) Number of training examples : 1 Number of valid (projective) examples : 1 SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU >>> parser_eager.train([gold_sent],'temp.arceager.model') Number of training examples : 1 Number of valid (projective) examples : 1 ... >>> remove(input_file.name) ###################### Check The Parsing Function ######################## A. Check the ARC-STANDARD parser >>> result = parser_std.parse([gold_sent], 'temp.arcstd.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True B. Check the ARC-EAGER parser >>> result = parser_eager.parse([gold_sent], 'temp.arceager.model') >>> de = DependencyEvaluator(result, [gold_sent]) >>> de.eval() >= (0, 0) True Note that result is very poor because of only one training example. """ if __name__ == '__main__': import doctest doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
mit
jeffery-do/Vizdoombot
doom/lib/python3.5/site-packages/skimage/viewer/tests/test_viewer.py
35
2165
from skimage import data from skimage.viewer.qt import QtGui, QtCore, has_qt from skimage.viewer import ImageViewer, CollectionViewer from skimage.viewer.plugins import OverlayPlugin from skimage.transform import pyramid_gaussian from skimage.filters import sobel from numpy.testing import assert_equal from numpy.testing.decorators import skipif from skimage._shared.version_requirements import is_installed from skimage._shared._warnings import expected_warnings @skipif(not has_qt) def test_viewer(): astro = data.astronaut() coins = data.coins() view = ImageViewer(astro) import tempfile _, filename = tempfile.mkstemp(suffix='.png') view.show(False) view.close() view.save_to_file(filename) view.open_file(filename) assert_equal(view.image, astro) view.image = coins assert_equal(view.image, coins), view.save_to_file(filename), view.open_file(filename), view.reset_image(), assert_equal(view.image, coins) def make_key_event(key): return QtGui.QKeyEvent(QtCore.QEvent.KeyPress, key, QtCore.Qt.NoModifier) @skipif(not has_qt) def test_collection_viewer(): img = data.astronaut() img_collection = tuple(pyramid_gaussian(img)) view = CollectionViewer(img_collection) make_key_event(48) view.update_index('', 2), assert_equal(view.image, img_collection[2]) view.keyPressEvent(make_key_event(53)) assert_equal(view.image, img_collection[5]) view._format_coord(10, 10) @skipif(not has_qt) @skipif(not is_installed('matplotlib', '>=1.2')) def test_viewer_with_overlay(): img = data.coins() ov = OverlayPlugin(image_filter=sobel) viewer = ImageViewer(img) viewer += ov import tempfile _, filename = tempfile.mkstemp(suffix='.png') ov.color = 3 assert_equal(ov.color, 'yellow') with expected_warnings(['precision loss']): viewer.save_to_file(filename) ov.display_filtered_image(img) assert_equal(ov.overlay, img) ov.overlay = None assert_equal(ov.overlay, None) ov.overlay = img assert_equal(ov.overlay, img) assert_equal(ov.filtered_image, img)
mit
pradyu1993/scikit-learn
examples/decomposition/plot_image_denoising.py
4
5587
""" ========================================= Image denoising using dictionary learning ========================================= An example comparing the effect of reconstructing noisy fragments of Lena using online :ref:`DictionaryLearning` and various transform methods. The dictionary is fitted on the non-distorted left half of the image, and subsequently used to reconstruct the right half. A common practice for evaluating the results of image denoising is by looking at the difference between the reconstruction and the original image. If the reconstruction is perfect this will look like gaussian noise. It can be seen from the plots that the results of :ref:`omp` with two non-zero coefficients is a bit less biased than when keeping only one (the edges look less prominent). It is in addition closer from the ground truth in Frobenius norm. The result of :ref:`least_angle_regression` is much more strongly biased: the difference is reminiscent of the local intensity value of the original image. Thresholding is clearly not useful for denoising, but it is here to show that it can produce a suggestive output with very high speed, and thus be useful for other tasks such as object classification, where performance is not necessarily related to visualisation. """ print __doc__ from time import time import pylab as pl import numpy as np from scipy.misc import lena from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.feature_extraction.image import extract_patches_2d from sklearn.feature_extraction.image import reconstruct_from_patches_2d ############################################################################### # Load Lena image and extract patches lena = lena() / 256.0 # downsample for higher speed lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2] lena /= 4.0 height, width = lena.shape # Distort the right half of the image print 'Distorting image...' distorted = lena.copy() distorted[:, height / 2:] += 0.075 * np.random.randn(width, height / 2) # Extract all clean patches from the left half of the image print 'Extracting clean patches...' t0 = time() patch_size = (7, 7) data = extract_patches_2d(distorted[:, :height / 2], patch_size) data = data.reshape(data.shape[0], -1) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) print 'done in %.2fs.' % (time() - t0) ############################################################################### # Learn the dictionary from clean patches print 'Learning the dictionary... ' t0 = time() dico = MiniBatchDictionaryLearning(n_atoms=100, alpha=1, n_iter=500) V = dico.fit(data).components_ dt = time() - t0 print 'done in %.2fs.' % dt pl.figure(figsize=(4.2, 4)) for i, comp in enumerate(V[:100]): pl.subplot(10, 10, i + 1) pl.imshow(comp.reshape(patch_size), cmap=pl.cm.gray_r, interpolation='nearest') pl.xticks(()) pl.yticks(()) pl.suptitle('Dictionary learned from Lena patches\n' + 'Train time %.1fs on %d patches' % (dt, len(data)), fontsize=16) pl.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) ############################################################################### # Display the distorted image def show_with_diff(image, reference, title): """Helper function to display denoising""" pl.figure(figsize=(5, 3.3)) pl.subplot(1, 2, 1) pl.title('Image') pl.imshow(image, vmin=0, vmax=1, cmap=pl.cm.gray, interpolation='nearest') pl.xticks(()) pl.yticks(()) pl.subplot(1, 2, 2) difference = image - reference pl.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2))) pl.imshow(difference, vmin=-0.5, vmax=0.5, cmap=pl.cm.PuOr, interpolation='nearest') pl.xticks(()) pl.yticks(()) pl.suptitle(title, size=16) pl.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2) show_with_diff(distorted, lena, 'Distorted image') ############################################################################### # Extract noisy patches and reconstruct them using the dictionary print 'Extracting noisy patches... ' t0 = time() data = extract_patches_2d(distorted[:, height / 2:], patch_size) data = data.reshape(data.shape[0], -1) intercept = np.mean(data, axis=0) data -= intercept print 'done in %.2fs.' % (time() - t0) transform_algorithms = [ ('Orthogonal Matching Pursuit\n1 atom', 'omp', {'transform_n_nonzero_coefs': 1}), ('Orthogonal Matching Pursuit\n2 atoms', 'omp', {'transform_n_nonzero_coefs': 2}), ('Least-angle regression\n5 atoms', 'lars', {'transform_n_nonzero_coefs': 5}), ('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})] reconstructions = {} for title, transform_algorithm, kwargs in transform_algorithms: print title, '... ' reconstructions[title] = lena.copy() t0 = time() dico.set_params(transform_algorithm=transform_algorithm, **kwargs) code = dico.transform(data) patches = np.dot(code, V) if transform_algorithm == 'threshold': patches -= patches.min() patches /= patches.max() patches += intercept patches = patches.reshape(len(data), *patch_size) if transform_algorithm == 'threshold': patches -= patches.min() patches /= patches.max() reconstructions[title][:, height / 2:] = reconstruct_from_patches_2d( patches, (width, height / 2)) dt = time() - t0 print 'done in %.2fs.' % dt show_with_diff(reconstructions[title], lena, title + ' (time: %.1fs)' % dt) pl.show()
bsd-3-clause
boland1992/seissuite_iran
seissuite/ant/pstomo.py
6
66086
""" Definition of classes handling dispersion curves and velocity maps (obtained by inverting dispersion curves) """ from seissuite.ant import pserrors, psutils import itertools as it import numpy as np from scipy.optimize import curve_fit from scipy.interpolate import interp1d import os import glob import pickle import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap from matplotlib import gridspec from matplotlib.colors import ColorConverter import shutil from inspect import getargspec # todo: discard measurments if too different from trimester velocities (see BB15B-SPB) # import CONFIG class initalised in ./configs/tmp_config.pickle config_pickle = 'configs/tmp_config.pickle' f = open(name=config_pickle, mode='rb') CONFIG = pickle.load(f) f.close() # import variables from initialised CONFIG class. MSEED_DIR = CONFIG.MSEED_DIR SIGNAL_WINDOW_VMIN = CONFIG.SIGNAL_WINDOW_VMIN SIGNAL_WINDOW_VMAX = CONFIG.SIGNAL_WINDOW_VMAX SIGNAL2NOISE_TRAIL = CONFIG.SIGNAL2NOISE_TRAIL NOISE_WINDOW_SIZE = CONFIG.NOISE_WINDOW_SIZE MINSPECTSNR = CONFIG.MINSPECTSNR MINSPECTSNR_NOSDEV = CONFIG.MINSPECTSNR_NOSDEV MAXSDEV = CONFIG.MAXSDEV MINNBTRIMESTER = CONFIG.MINNBTRIMESTER MAXPERIOD_FACTOR = CONFIG.MAXPERIOD_FACTOR LONSTEP = CONFIG.LONSTEP LATSTEP = CONFIG.LATSTEP CORRELATION_LENGTH = CONFIG.CORRELATION_LENGTH ALPHA = CONFIG.ALPHA BETA = CONFIG.BETA LAMBDA = CONFIG.LAMBDA FTAN_ALPHA = CONFIG.FTAN_ALPHA FTAN_VELOCITIES_STEP = CONFIG.FTAN_VELOCITIES_STEP PERIOD_RESAMPLE = CONFIG.PERIOD_RESAMPLE RAWFTAN_PERIODS = CONFIG.RAWFTAN_PERIODS # this is the bounding box for the stations that will be processed for tomo map global filter_box filter_box = False #import tomographic map bounding box BBOX_LARGE = CONFIG.BBOX_LARGE # ======================== # Constants and parameters # ======================== EPS = 1.0E-6 # custom color map for seismic anomalies # -------------------------------------- c = ColorConverter() colors = ['black', 'red', 'gold', 'white', 'white', 'aquamarine', 'blue', 'magenta'] values = [-1.0, -0.35, -0.1, -0.025, 0.025, 0.1, 0.35, 1.0] #colors = ['black', 'red', 'gold', 'lemonchiffon', 'white', # 'palegreen', 'aquamarine', 'blue', 'magenta'] #values = [-1.0, -0.7, -0.3, -0.1, 0.0, # 0.1, 0.3, 0.7, 1.0] rgblist = [c.to_rgb(s) for s in colors] reds, greens, blues = zip(*rgblist) cdict = {} for x, r, g, b in zip(values, reds, greens, blues): v = (x - min(values)) / (max(values) - min(values)) cdict.setdefault('red', []).append((v, r, r)) cdict.setdefault('green', []).append((v, g, g)) cdict.setdefault('blue', []).append((v, b, b)) CMAP_SEISMIC = LinearSegmentedColormap('customseismic', cdict) # custom color map for spatial resolution # --------------------------------------- colors = ['black', 'red', 'yellow', 'green', 'white'] values = [0, 0.25, 0.5, 0.75, 1.0] #colors = ['magenta', 'blue', 'aquamarine', 'palegreen', 'white', # 'lemonchiffon', 'gold', 'red', 'darkred'] #values = [-1.0, -0.7, -0.3, -0.1, # 0.1, 0.3, 0.7, 1.0] rgblist = [c.to_rgb(s) for s in colors] reds, greens, blues = zip(*rgblist) cdict = {} for x, r, g, b in zip(values, reds, greens, blues): v = (x - min(values)) / (max(values) - min(values)) cdict.setdefault('red', []).append((v, r, r)) cdict.setdefault('green', []).append((v, g, g)) cdict.setdefault('blue', []).append((v, b, b)) CMAP_RESOLUTION = LinearSegmentedColormap('customresolution', cdict) CMAP_RESOLUTION.set_bad(color='0.85') # custom color map for path density # --------------------------------------- colors = ['white', 'cyan', 'green', 'yellow', 'red', 'black'] values = [0, 0.05, 0.1, 0.25, 0.5, 1.0] rgblist = [c.to_rgb(s) for s in colors] reds, greens, blues = zip(*rgblist) cdict = {} for x, r, g, b in zip(values, reds, greens, blues): v = (x - min(values)) / (max(values) - min(values)) cdict.setdefault('red', []).append((v, r, r)) cdict.setdefault('green', []).append((v, g, g)) cdict.setdefault('blue', []).append((v, b, b)) CMAP_DENSITY = LinearSegmentedColormap('customdensity', cdict) class DispersionCurve: """ Class holding a dispersion curve, i.e., velocity as a function of period """ def __init__(self, periods, v, station1, station2, minspectSNR=MINSPECTSNR, minspectSNR_nosdev=MINSPECTSNR_NOSDEV, maxsdev=MAXSDEV, minnbtrimester=MINNBTRIMESTER, maxperiodfactor=MAXPERIOD_FACTOR, nom2inst_periods=None): """ Initiliazes the dispersion curve between the pair *station1*-*station2* using the given velocities (array *v*) at the given *periods*. Selection parameters (used to select velocities that will participate to the tomographic inversion) are given in *minspectSNR*, *minspectSNR_nosdev*, *maxsdev*, *minnbtrimester* and *maxperiodfactor*. Periods can be nominal (i.e., center of Gaussian filters of FTAN) or instantaneous (dphi/dt). If periods are instantaneous, then a list of tuples [(nominal period, instantaneous period), ...] should be provided in *nom2inst_periods* @type periods: iterable @type v: iterable @type station1: L{psstation.Station} @type station2: L{psstation.Station} """ # periods and associated velocities self.periods = np.array(periods) self.v = np.array(v) # SNRs along periods self._SNRs = None # trimester velocities and SNRs self.v_trimesters = {} self._SNRs_trimesters = {} # stations self.station1 = station1 self.station2 = station2 # selection parameters self.minspectSNR = minspectSNR self.minspectSNR_nosdev = minspectSNR_nosdev self.maxsdev = maxsdev self.minnbtrimester = minnbtrimester self.maxperiodfactor = maxperiodfactor # list of (nominal period, instantaneous period) self.nom2inst_periods = nom2inst_periods def __repr__(self): return 'Dispersion curve between stations {}-{}'.format(self.station1.name, self.station2.name) def get_period_index(self, period, verbose=True): """ Gets index of *period*, or raises an error if period is not found """ if verbose: print "period: ", period print "self.periods: ", self.periods iperiod = np.abs(self.periods - period).argmin() if np.abs(self.periods[iperiod] - period) > EPS: raise Exception('Cannot find period in dispersion curve') return iperiod def update_parameters(self, minspectSNR=None, minspectSNR_nosdev=None, maxsdev=None, minnbtrimester=None, maxperiodfactor=None): """ Updating one or more filtering parameter(s) """ if not minspectSNR is None: self.minspectSNR = minspectSNR if not minspectSNR_nosdev is None: self.minspectSNR_nosdev = minspectSNR_nosdev if not maxsdev is None: self.maxsdev = maxsdev if not minnbtrimester is None: self.minnbtrimester = minnbtrimester if not maxperiodfactor is None: self.maxperiodfactor = maxperiodfactor def dist(self): """ Interstation spacing (km) """ return self.station1.dist(self.station2) def add_trimester(self, trimester_start, curve_trimester): """ Adding a trimester dispersion curve. @type trimester_start: int @type curve_trimester: L{DispersionCurve} """ if trimester_start in self.v_trimesters: raise Exception('Trimester already added') if np.any(curve_trimester.periods != self.periods): raise Exception("Wrong periods for trimester curve") # adding velocity adn SNR arrays of trimester self.v_trimesters[trimester_start] = curve_trimester.v self._SNRs_trimesters[trimester_start] = curve_trimester._SNRs def add_SNRs(self, xc, filter_alpha=FTAN_ALPHA, months=None, vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX, signal2noise_trail=SIGNAL2NOISE_TRAIL, noise_window_size=NOISE_WINDOW_SIZE): """ Adding spectral SNRs at each period of the dispersion curve. The SNRs are calculated from the cross-correlation data bandpassed with narrow Gaussian filters (similar to the filter used in the FTAN) centered at self.periods, and width controlled by *filter_alpha*. (See psutils.bandpass_gaussian().) Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size* control the location of the signal window and the noise window (see function xc.SNR()). @type xc: L{CrossCorrelation} """ try: centerperiods_and_alpha = zip(self.periods, [filter_alpha] * len(self.periods)) SNRs = xc.SNR(centerperiods_and_alpha=centerperiods_and_alpha, months=months, vmin=vmin, vmax=vmax, signal2noise_trail=signal2noise_trail, noise_window_size=noise_window_size) if self.nom2inst_periods: # if a list of (nominal period, inst period) is provided # we use it to re-interpolate SNRs inst_period_func = interp1d(*zip(*self.nom2inst_periods)) SNRs = np.interp(x=self.periods, xp=inst_period_func(self.periods), fp=SNRs, left=np.nan, right=np.nan) self._SNRs = SNRs except Exception as err: print "Something has gone wrong with the SNR calculations: {}"\ .format(err) self.SNRs = None def get_SNRs(self, **kwargs): if self._SNRs is None: self.add_SNRs(**kwargs) return self._SNRs def filtered_sdevs(self): """ Standard dev of velocity at each period, calculated across trimester velocity curves. On periods at which std dev cannot be calculated, NaNs are returned. Selection criteria: - SNR of trimester velocity >= minspectSNR - nb of trimester velocities >= minnbtrimester @rtype: L{numpy.ndarray} """ # list of arrays of trimester velocities trimester_vels = self.filtered_trimester_vels() sdevs = [] for v_across_trimesters in zip(*trimester_vels): # filtering out nans from trimester velocities v_across_trimesters = [v for v in v_across_trimesters if not np.isnan(v)] if len(v_across_trimesters) >= self.minnbtrimester: sdev = np.std(v_across_trimesters) else: # not enough trimester velocities to estimate std dev sdev = np.nan sdevs.append(sdev) return np.array(sdevs) if sdevs else np.ones_like(self.periods) * np.nan def filtered_vels_sdevs(self): """ Returns array of velocities and array of associated standard deviations. Velocities not passing selection criteria are replaced with NaNs. Where standard deviation cannot be estimated, NaNs are returned. Selection criteria: 1) period <= distance * *maxperiodfactor* 2) for velocities having a standard deviation associated: - standard deviation <= *maxsdev* - SNR >= *minspectSNR* 3) for velocities NOT having a standard deviation associated: - SNR >= *minspectSNR_nosdev* (SNRs equal to Nan are replaced with 0) @rtype: L{numpy.ndarray}, L{numpy.ndarray} """ #print "self._SNRs in filtered_vels_sdevs: ", self._SNRs if self._SNRs is None: raise Exception("Spectral SNRs not defined") # estimating std devs, WHERE POSSIBLE (returning NaNs where not possible) sdevs = self.filtered_sdevs() has_sdev = ~np.isnan(sdevs) # where are std devs defined? # Selection criteria: # 1) period <= distance * *maxperiodfactor* cutoffperiod = self.maxperiodfactor * self.dist() mask = self.periods <= cutoffperiod # 2) for velocities having a standard deviation associated: # - standard deviation <= *maxsdev* # - SNR >= *minspectSNR* mask[has_sdev] &= (sdevs[has_sdev] <= self.maxsdev) & \ (np.nan_to_num(self._SNRs[has_sdev]) >= self.minspectSNR) # 3) for velocities NOT having a standard deviation associated: # - SNR >= *minspectSNR_nosdev* mask[~has_sdev] &= \ np.nan_to_num(self._SNRs[~has_sdev]) >= self.minspectSNR_nosdev # replacing velocities not passing the selection criteria with NaNs #print np.where(mask, self.v, np.nan), sdevs return np.where(mask, self.v, np.nan), sdevs def filtered_vel_sdev_SNR(self, period): """ Returns a velocity, its std deviation and SNR at a given period, or nan if the velocity does not satisfy the criteria, or raises an exception if the period is not found. @type period: float @rtype: (float, float, float) """ iperiod = self.get_period_index(period) vels, sdevs = self.filtered_vels_sdevs() return vels[iperiod], sdevs[iperiod], self._SNRs[iperiod] def filtered_trimester_vels(self): """ Returns list of arrays of trimester velocities, or nan. Selection criteria: - SNR of trimester velocity defined and >= minspectSNR - period <= pair distance * *maxperiodfactor* @rtype: list of L{numpy.ndarray} """ # filtering criterion: periods <= distance * maxperiodfactor dist = self.station1.dist(self.station2) periodmask = self.periods <= self.maxperiodfactor * dist varrays = [] for trimester_start, vels in self.v_trimesters.items(): SNRs = self._SNRs_trimesters.get(trimester_start) if SNRs is None: raise Exception("Spectral SNRs not defined") # filtering criterion: SNR >= minspectSNR mask = periodmask & (np.nan_to_num(SNRs) >= self.minspectSNR) varrays.append(np.where(mask, vels, np.nan)) return varrays class Grid: """ Class holding a 2D regular rectangular spatial grid """ def __init__(self, xmin, xstep, nx, ymin, ystep, ny): """ Min coords, step size and nb of points of grid """ self.xmin = xmin self.xstep = xstep self.nx = int(nx) self.ymin = ymin self.ystep = ystep self.ny = int(ny) def __repr__(self): s = '<2D grid: x = {}...{} by {}, y = {}...{} by {}>' return s.format(self.xmin, self.get_xmax(), self.xstep, self.ymin, self.get_ymax(), self.ystep) def __eq__(self, other): """ @type other: Grid """ try: samegrids = (self.xmin == other.xmin and self.xstep == other.xstep and self.nx == other.nx and self.ymin == other.ymin and self.ystep == other.ystep and self.ny == other.ny) return samegrids except: return False def __ne__(self, other): return not self.__eq__(other) def get_xmax(self): return self.xmin + (self.nx - 1) * self.xstep def get_ymax(self): return self.ymin + (self.ny - 1) * self.ystep def bbox(self): """ Bounding box: (xmin, xmax, ymin, ymax) @rtype: (float, float, float, float) """ return self.xmin, self.get_xmax(), self.ymin, self.get_ymax() def n_nodes(self): """ Nb of nodes on grid """ return self.nx * self.ny def ix_iy(self, index_): """ Indexes along x and y-axis of node nb *index_* """ ix = np.int_(np.array(index_) / self.ny) iy = np.mod(np.array(index_), self.ny) return ix, iy def xy(self, index_): """ Coords of node nb *index_* """ index_ = np.array(index_) #if np.any((index_ < 0) | (index_ > self.n_nodes() - 1)): # raise Exception('Index out of bounds') ix, iy = self.ix_iy(index_) return self._x(ix), self._y(iy) def xy_nodes(self): """ Returns coords of all nodes of grid """ return self.xy(np.arange(0, self.n_nodes())) def xarray(self): return np.linspace(self.xmin, self.get_xmax(), num=self.nx, endpoint=True) def yarray(self): return np.linspace(self.ymin, self.get_ymax(), num=self.ny, endpoint=True) def index_(self, ix, iy): """ Index of node (ix, iy) in grid: - 0 : ix=0, iy=0 - 1 : ix=0, iy=1 - ... - ny: ix=1, iy=0 - ... - nx*ny-1: ix=nx-1, iy=ny-1 """ ix = np.array(ix) iy = np.array(iy) #if np.any((ix < 0) | (ix > self.nx - 1)): # raise Exception('ix out of bounds') #if np.any((iy < 0) | (iy > self.ny - 1)): # raise Exception('iy out of bounds') return ix * self.ny + iy def indexes_delaunay_triangle(self, x, y): """ Indexes of the grid's nodes defining the Delaunay triangle around point (x, y) """ # x and y indexes of bottom left neighbour ix = self._xindex_left_neighbour(x) iy = self._yindex_bottom_neighbour(y) np.where(ix == self.nx - 1, ix - 1, ix) np.where(iy == self.ny - 1, iy - 1, iy) xratio = (x - self._x(ix)) / self.xstep yratio = (y - self._y(iy)) / self.ystep # returning indexes of vertices of bottom right triangle # or upper left triangle depending on location index1 = self.index_(ix, iy) index2 = np.where(xratio >= yratio, self.index_(ix+1, iy), self.index_(ix, iy+1)) index3 = self.index_(ix+1, iy+1) return index1, index2, index3 def geodetic_dist(self, index1, index2): """ Geodetic distance between nodes nb *index1* and *index2*, whose coodinates (x, y) are treated as (lon, lat) """ lon1, lat2 = self.xy(index1) lon2, lat2 = self.xy(index2) return psutils.dist(lons1=lon1, lats1=lat2, lons2=lon2, lats2=lat2) def to_2D_array(self, a): """ Converts a sequence-like *a* to a 2D array b[ix, iy] such that i is the index of node (ix, iy) """ b = np.zeros((self.nx, self.ny)) ix, iy = self.ix_iy(range(self.n_nodes())) b[ix, iy] = np.array(a).flatten() return b def _x(self, ix): """ Returns the abscissa of node nb *ix* on x-axis (ix = 0 ... nx-1) """ ix = np.array(ix) if np.any((ix < 0) | (ix > self.nx - 1)): raise Exception('ix out of bounds') return self.xmin + ix * self.xstep def _y(self, iy): """ Returns the ordinate of node nb *iy* on y-axis """ iy = np.array(iy) if np.any((iy < 0) | (iy > self.ny - 1)): raise Exception('iy out of bounds') return self.ymin + iy * self.ystep def _xindex_left_neighbour(self, x): """ Returns the index (along x-axis) of the grid nodes closest to (and on the left of) *x* (Index of 1st node = 0, index of last node = nx - 1) @rtype: Number """ x = np.array(x) # checking bounds out_of_bounds = (x < self.xmin) | (x > self.get_xmax()) if np.any(out_of_bounds): s = 'some x {} are out of bounds [{} - {}]' raise Exception(s.format(x[out_of_bounds], self.xmin, self.get_xmax())) # index of closest left node return np.int_((x - self.xmin) / self.xstep) def _yindex_bottom_neighbour(self, y): """ Same as above method, along y axis @rtype: Number """ y = np.array(y) # checking bounds out_of_bounds = (y < self.ymin) | (y > self.get_ymax()) if np.any(out_of_bounds): s = 'some y {} are out of bounds [{} - {}]' raise Exception(s.format(y[out_of_bounds], self.ymin, self.get_ymax())) # index of closest bottom node return np.int_((y - self.ymin) / self.ystep) class VelocityMap: """ Class taking care of the inversion of velocities between pairs of stations, to produce a velocity map at a given period. The inversion procedure of Barmin et al. (2001) is applied. Attributes: - period : period (s) of the velocity map - disp_curves : disp curves whose period's velocity is not nan - paths : list of geodesic paths associated with pairs of stations of dispersion curves - v0 : reference velocity (inverse of mean slowness, i.e., slowness implied by all observed travel-times) - dobs : vector of observed data (differences observed-reference travel time) - Cinv : inverse of covariance matrix of the data - G : forward matrix, such that d = G.m (m = parameter vector = (v0-v)/v at grid nodes) - density : array of path densities at grid nodes - Q : regularization matrix - Ginv : inversion operator, (Gt.C^-1.G + Q)^-1.Gt - mopt : vector of best-fitting parameters, Ginv.C^-1.dobs = best-fitting (v0-v)/v at grid nodes - R : resolution matrix, (Gt.C^-1.G + Q)^-1.Gt.C^-1.G = Ginv.C^-1.G - Rradius : array of radii of the cones that best-fit each line of the resolution matrix Note that vectors (d, m) and matrixes (Cinv, G, Q, Ginv, R) are NOT numpy arrays, but numpy matrixes (vectors being n x 1 matrixes). This means that the product operation (*) on such objects is NOT the element-by-element product, but the real matrix product. """ def __init__(self, dispersion_curves, period, skippairs=(), resolution_fit='cone', min_resolution_height=0.1, showplot=False, verbose=True, **kwargs): """ Initializes the velocity map at period = *period*, from the observed velocities in *dispersion_curves*: - sets up the data vector, forward matrix and regularization matrix - performs the tomographic inversion to estimate the best-fitting parameters and the resolution matrix - estimates the characteristic spatial resolution by fitting a cone to each line of the resolution matrix Specify pairs to be skipped (if any), as a list of pairs of stations names, e.g., skippairs = [('APOB', 'SPB'), ('ITAB', 'BAMB')]. This option is useful to perform a 2-pass tomographic inversion, wherein pairs with a too large difference observed/predicted travel- time are excluded from the second pass. Select the type of function you want to fit to each resolution map with *resolution_fit*: - 'cone' to fit a cone, and report the cone's radius as characteristic resolution at each grid node in self.Rradius - 'gaussian' to fit a gaussian function, exp(-r/2.sigma^2), and report 2.sigma as characteristic resolution at each grid node in self.Rradius Note that all resolutions in self.Rradius having a best-fitting cone height < *min_resolution_height* * max height will be discarded and set to nan. Append optional argument (**kwargs) to override default values: - minspectSNR : min spectral SNR to retain velocity (default MINSPECTSNR) - minspectSNR_nosdev: min spectral SNR to retain velocities without standard deviation (default MINSPECTSNR_NOSDEV) - minnbtrimester : min nb of trimester velocities to estimate standard deviation of velocity - maxsdev : max standard deviation to retain velocity (default MAXSDEV) - lonstep : longitude step of grid (default LONSTEP) - latstep : latitude step of grid (default LATSTEP) - correlation_length: correlation length of the smoothing kernel: S(r,r') = exp[-|r-r'|**2 / (2 * correlation_length**2)] (default value CORRELATION_LENGTH) - alpha : strength of the spatial smoothing term in the penalty function (default ALPHA) - beta : strength of the weighted norm penalization term in the penalty function (default BETA) - lambda_ : parameter in the damping factor of the norm penalization term, such that the norm is weighted by: exp(- lambda_*path_density) With a value of 0.15, penalization becomes strong when path density < ~20 With a value of 0.30, penalization becomes strong when path density < ~10 (default LAMBDA) @type dispersion_curves: list of L{DispersionCurve} @type skippairs: list of (str, str) """ self.period = period # reading inversion parameters minspectSNR = kwargs.get('minspectSNR', MINSPECTSNR) minspectSNR_nosdev = kwargs.get('minspectSNR_nosdev', MINSPECTSNR_NOSDEV) minnbtrimester = kwargs.get('minnbtrimester', MINNBTRIMESTER) maxsdev = kwargs.get('maxsdev', MAXSDEV) lonstep = kwargs.get('lonstep', LONSTEP) latstep = kwargs.get('latstep', LATSTEP) correlation_length = kwargs.get('correlation_length', CORRELATION_LENGTH) alpha = kwargs.get('alpha', ALPHA) beta = kwargs.get('beta', BETA) lambda_ = kwargs.get('lambda_', LAMBDA) # reading inversion parameters if verbose: print "Velocities selection criteria:" print "- rejecting velocities if SNR < {}".format(minspectSNR) s = "- rejecting velocities without std dev if SNR < {}" print s.format(minspectSNR_nosdev) s = "- estimating standard dev of velocities with more than {} trimesters" print s.format(minnbtrimester) print "- rejecting velocities with standard dev > {} km/s".format(maxsdev) print "\nTomographic inversion parameters:" print "- {} x {} deg grid".format(lonstep, latstep) s = "- correlation length of the smoothing kernel: {} km" print s.format(correlation_length) print "- strength of the spatial smoothing term: {}".format(alpha) print "- strength of the norm penalization term: {}".format(beta) print "- weighting norm by exp(- {} * path_density)".format(lambda_) print # skipping pairs if skippairs: skippairs = [set(pair) for pair in skippairs] dispersion_curves = [c for c in dispersion_curves if not {c.station1.name, c.station2.name} in skippairs] # updating parameters of dispersion curves for c in dispersion_curves: c.update_parameters(minspectSNR=minspectSNR, minspectSNR_nosdev=minspectSNR_nosdev, minnbtrimester=minnbtrimester, maxsdev=maxsdev) # valid dispersion curves (velocity != nan at period) and # associated interstation distances self.disp_curves = [c for c in dispersion_curves if not np.isnan( c.filtered_vel_sdev_SNR(self.period)[0])] if filter_box: if verbose: print "Number of dispersion curves before bounding box filter:\ ", len(self.disp_curves) filter_disp_curves = [] lonmin, lonmax, latmin, latmax = BBOX_LARGE print "\nBoundary box coordinates: ", lonmin, lonmax, latmin, latmax for c in self.disp_curves: lon1, lat1 = c.station1.coord lon2, lat2 = c.station2.coord # check that both stations are in the bounding box! if lonmin <= lon1 <= lonmax and lonmin <= lon2 <= lonmax and\ latmin <= lat1 <= latmax and latmin <= lat2 <= latmax: filter_disp_curves.append(c) self.disp_curves = filter_disp_curves if verbose: print "Number of dispersion curves after bounding box filter:\ ", len(self.disp_curves) if not self.disp_curves: s = "No valid velocity at selected period ({} sec)" raise pserrors.CannotPerformTomoInversion(s.format(period)) dists = np.array([c.dist() for c in self.disp_curves]) # getting (non nan) velocities and std devs at period vels, sigmav, _ = zip(*[c.filtered_vel_sdev_SNR(self.period) for c in self.disp_curves]) vels = np.array(vels) sigmav = np.array(sigmav) sigmav_isnan = np.isnan(sigmav) if np.all(sigmav_isnan): s = "No valid std deviation at selected period ({} sec)" print "Setting std to maximum by taking smallest order of magnitude." sigmav = np.array([i/100. for i in vels]) sigmav_isnan = np.isnan(sigmav) #raise pserrors.CannotPerformTomoInversion(s.format(period)) print "vels: ", vels print "sigmav: ", sigmav print "sigmav_isnan: ", sigmav_isnan # If the resolution in the velocities space is dv, # it means that a velocity v is actually anything between # v-dv/2 and v+dv/2, so the standard deviation cannot be # less than the standard dev of a uniform distribution of # width dv, which is dv / sqrt(12). Note that: # # dv = max(dv_FTAN, dt_xc * v^2/dist), # # with dv_FTAN the intrinsic velocity discretization step # of the FTAN, and dt_xc the sampling interval of the # cross-correlation. dv = np.maximum(FTAN_VELOCITIES_STEP, PERIOD_RESAMPLE * vels**2 / dists) minsigmav = dv / np.sqrt(12) sigmav[~sigmav_isnan] = np.maximum(sigmav[~sigmav_isnan], minsigmav[~sigmav_isnan]) # where std dev cannot be estimated (std dev = nan), # assigning 3 times the mean std dev of the period # following Bensen et al. (2008) sigmav[sigmav_isnan] = 3 * sigmav[~sigmav_isnan].mean() # ====================================================== # setting up reference velocity and data vector # = vector of differences observed-reference travel time # ====================================================== if verbose: print 'Setting up reference velocity (v0) and data vector (dobs)' # reference velocity = inverse of mean slowness # mean slowness = slowness implied by observed travel-times # = sum(observed travel-times) / sum(intersation distances) s = (dists / vels).sum() / dists.sum() self.v0 = 1.0 / s # data vector self.dobs = np.matrix(dists / vels - dists / self.v0).T # inverse of covariance matrix of the data if verbose: print 'Setting up covariance matrix (C)' sigmad = sigmav * dists / vels**2 self.Cinv = np.matrix(np.zeros((len(sigmav), len(sigmav)))) np.fill_diagonal(self.Cinv, 1.0 / sigmad**2) # REMOVE HARD WIRES WHEN POSSIBLE lonstep = 0.1 latstep = 0.1 # spatial grid for tomographic inversion (slightly enlarged to be # sure that no path will fall outside) lons1, lats1 = zip(*[c.station1.coord for c in self.disp_curves]) lons2, lats2 = zip(*[c.station2.coord for c in self.disp_curves]) tol = 1.0 lonmin = np.floor(min(lons1 + lons2) - tol) nlon = np.ceil((max(lons1 + lons2) + tol - lonmin) / lonstep) + 1 latmin = np.floor(min(lats1 + lats2) - tol) nlat = np.ceil((max(lats1 + lats2) + tol - latmin) / latstep) + 1 self.grid = Grid(lonmin, lonstep, nlon, latmin, latstep, nlat) #if verbose: #boundary_coords = ((latmin, lonmin), (lonmin + lonstep * nlon, # latmin + latstep * nlat)) #print "((lonmin, latmin), (lonmax, latmax)): ", boundary_coords # geodesic paths associated with pairs of stations of dispersion curves if verbose: print 'Calculating interstation paths' self.paths = [] for curve, dist in zip(self.disp_curves, dists): # interpoint distance <= 1 km, and nb of points >= 100 npts = max(np.ceil(dist) + 1, 100) path = psutils.geodesic(curve.station1.coord, curve.station2.coord, npts) self.paths.append(path) # ================================================ # setting up forward matrix G, such that d = G.m # # G[i,j] = integral{w_j(r) / v0 ds} over path nb i # (w_j(r) = weight of node nb j on location r) # ================================================ G = np.zeros((len(self.paths), self.grid.n_nodes())) if verbose: print 'Setting up {} x {} forward matrix (G)'.format(*G.shape) for ipath, path in enumerate(self.paths): # for each point M along the path (1) we determine the Delaunay # triangle ABC that encloses M, (2) we locally define a cartesian # system on the plane ABC, (3) we locate M' (the projection of M # on the plane ABC) and (4) we attribute weights to A, B, C # corresponding to the three-point linear interpolation of A, B, # C at point M'. lon_M, lat_M = path[:, 0], path[:, 1] xyzM = psutils.geo2cartesian(lon_M, lat_M) # indexes, geographic coordinates and cartesian coordinates # (on unit sphere) of grid nodes of Delaunay triangle ABC # enclosing M iA, iB, iC = self.grid.indexes_delaunay_triangle(lon_M, lat_M) lonlatA, lonlatB, lonlatC = [self.grid.xy(index_) for index_ in (iA, iB, iC)] xyzA, xyzB, xyzC = [psutils.geo2cartesian(lon, lat) for lon, lat in (lonlatA, lonlatB, lonlatC)] # projection of M on the plane ABC xyzMp = psutils.projection(xyzM, xyzA, xyzB, xyzC) # weights of nodes A, B, C in linear interpolation = # barycentric coordinates of M' in triangle ABC wA, wB, wC = psutils.barycentric_coords(xyzMp, xyzA, xyzB, xyzC) # attributing weights to grid nodes along path: # w[j, :] = w_j(r) = weights of node j along path #try and write in a condition here that eliminates issues nM = path.shape[0] w = np.zeros((self.grid.n_nodes(), nM)) w[iA, range(nM)] = wA w[iB, range(nM)] = wB w[iC, range(nM)] = wC # ds = array of infinitesimal distances along path ds = psutils.dist(lons1=lon_M[:-1], lats1=lat_M[:-1], lons2=lon_M[1:], lats2=lat_M[1:]) # integrating w_j(r) / v0 along path using trapeze formula G[ipath, :] = np.sum(0.5 * (w[:, :-1] + w[:, 1:]) / self.v0 * ds, axis=-1) self.G = np.matrix(G) # path densities around grid's nodes if verbose: print "Calculating path densities" self.density = self.path_density() # ===================================================================== # setting up regularization matrix Q = Ft.F + Ht.H # # F[i,j] = alpha * | 1 if i = j # | -S(ri,rj) / sum{S(ri,rj')} over j' != i] if i!= j # # H[i,j] = beta * | exp[-lambda * path_density(ri)] if i = j # | 0 if i!= j # # with S(.,.) the smoothing kernel and ri the locations grid nodes # ===================================================================== # setting up distance matrix: # dists[i,j] = distance between nodes nb i and j dists = np.zeros((self.grid.n_nodes(), self.grid.n_nodes())) if verbose: print "Setting up {} x {} regularization matrix (Q)".format(*dists.shape) # indices of the upper right triangle of distance matrix # = (array of index #1, array of index #2) i_upper, j_upper = np.triu_indices_from(dists) lons_i, lats_i = self.grid.xy(i_upper) lons_j, lats_j = self.grid.xy(j_upper) # distance matrix (upper triangle) dists[i_upper, j_upper] = psutils.dist(lons1=lons_i, lats1=lats_i, lons2=lons_j, lats2=lats_j) # symmetrizing distance matrix (works because diagonal elts = 0) dists += dists.T # setting up smoothing kernel: # S[i,j] = K * exp[-|ri-rj|**2 / (2 * CORRELATION_LENGTH**2)] S = np.exp(- dists**2 / (2 * correlation_length**2)) S /= S.sum(axis=-1) - np.diag(S) # normalization of non-diagonal terms # setting up spatial regularization matrix F F = np.matrix(-S) F[np.diag_indices_from(F)] = 1 F *= alpha # setting up regularization matrix Q # ... Ft.F part Q = F.T * F # ... Ht.H part for i, path_density in enumerate(self.density): Q[i, i] += beta**2 * np.exp(-2 * lambda_ * path_density) self.Q = Q # =========================================================== # setting up inversion operator Ginv = (Gt.C^-1.G + Q)^-1.Gt, # estimating model and setting up resolution matrix R = # Ginv.C^-1.G # =========================================================== # inversion operator if verbose: print "Setting up inversion operator (Ginv)" self.Ginv = (self.G.T * self.Cinv * self.G + self.Q).I * self.G.T # vector of best-fitting parameters if verbose: print "Estimating best-fitting parameters (mopt)" self.mopt = self.Ginv * self.Cinv * self.dobs # resolution matrix if verbose: print "Setting up {0} x {0} resolution matrix (R)".format(self.G.shape[1]) self.R = self.Ginv * self.Cinv * self.G # =========================================================== # Estimating spatial resolution at each node of the grid, # Rradius. # # The i-th row of the resolution matrix, R[i,:], contains the # resolution map associated with the i-th grid noe, that is, # the estimated model we would get if there were only a point # velocity anomaly at node nb i. So a cone centered on node # nb i is fitted to the resolution map, and its radius gives # an indication of the spatial resolution at node nb i (i.e., # the minimum distance at which two point anomalies can be # resolved) # =========================================================== if verbose: print "Estimation spatial resolution (Rradius)" self.Rradius = np.zeros(self.grid.n_nodes()) heights = np.zeros(self.grid.n_nodes()) for i, Ri in enumerate(np.array(self.R)): lon0, lat0 = self.grid.xy(i) # best-fitting cone at point (lon0, lat0) # Function returning the height of cone of radius *r0* # and peak *z0*, at a point located *r* km away from # the cone's center if resolution_fit.lower().strip() == 'cone': def cone_height(r, z0, r0): """ Cone """ return np.where(r < r0, z0 * (1 - r / r0), 0.0) elif resolution_fit.lower().strip() == 'gaussian': def cone_height(r, z0, r0): """ Gaussian function """ sigma = r0 / 2.0 return z0 * np.exp(- r**2 / (2 * sigma**2)) else: s = "Unknown function to fit resolution: '{}'" raise Exception(s.format(resolution_fit)) # distances between nodes and cone's center (lon0, lat0) lonnodes, latnodes = self.grid.xy_nodes() n = self.grid.n_nodes() rdata = psutils.dist(lons1=lonnodes, lats1=latnodes, lons2=n*[lon0], lats2=n*[lat0]) # best possible resolution *rmin* = 2 * inter-node distance # -> estimating *rmin* along the meridian crossing the cone's # center (conservative choice as it yields the largest # possible value) d2rad = np.pi / 180.0 rmin = 2 * d2rad * 6371.0 * max(self.grid.xstep * np.cos(lat0 * d2rad), self.grid.ystep) # fitting the above function to observed heights along nodes, # in array abs(Ri) popt, _ = curve_fit(f=cone_height, xdata=rdata, ydata=np.abs(Ri), p0=[1, 2*rmin], maxfev=10000) z0, r0 = popt # reslution cannot be better than *rmin* r0 = max(rmin, r0) # appending spatial resolution to array self.Rradius[i] = r0 heights[i] = z0 self.Rradius[heights < heights.max() * min_resolution_height] = np.nan if showplot: # potting maps of velocity perturbation, # path density and resolution _ = self.plot() def __repr__(self): """ E.g., "<Velocity map at period = 10 s>" """ return '<Velocity map at period = {} s>'.format(self.period) def path_density(self, window=(LONSTEP, LATSTEP)): """ Returns the path density, that is, on each node of the grid, the number of paths that cross the rectangular cell of size (window[0], window[1]) centered on the node. """ # initializing path density density = np.zeros(self.grid.n_nodes()) # coordinates of grid nodes and associated windows lons_nodes, lats_nodes = self.grid.xy_nodes() lons_min = np.expand_dims(lons_nodes - window[0] / 2.0, axis=-1) lons_max = np.expand_dims(lons_nodes + window[0] / 2.0, axis=-1) lats_min = np.expand_dims(lats_nodes - window[1] / 2.0, axis=-1) lats_max = np.expand_dims(lats_nodes + window[1] / 2.0, axis=-1) for path in self.paths: lons_path, lats_path = path[:, 0], path[:, 1] # are points of paths in windows? # 1st dim = grid nodes; 2nd dim = points along path points_in_windows = (lons_path >= lons_min) & (lons_path <= lons_max) & \ (lats_path >= lats_min) & (lats_path <= lats_max) density += np.any(points_in_windows, axis=-1) return density def traveltime_residuals(self, relative=False): """ Returns the [relative] differences between predicted-observed travel times at each pair of stations: differences = predicted - observed travel-time, = dpred - dobs, with dpred = G.mopt relative differences = (predicted - observed) / observed travel-time = (dpred - dobs) / (dobs + ref travel-time) @rtype: L{ndarray} """ # flattening differences as 1D array diffs = np.array(self.G * self.mopt - self.dobs).flatten() if not relative: return diffs else: ttref = np.array([c.dist() / self.v0 for c in self.disp_curves]) ttobs = np.array(self.dobs).flatten() + ttref # observed travel-times return diffs / ttobs def velocity_residuals(self, relative=False): """ Returns the [relative] differences between observed-predicted velocities (implied by travel times) at each pair of stations: differences = observed - predicted velocity, = observed - predicted (dist / travel time), @rtype: L{matrix} """ dists = np.array([c.dist() for c in self.disp_curves]) ttref = np.array([c.dist() / self.v0 for c in self.disp_curves]) ttobs = np.array(self.dobs).flatten() + ttref # observed travel-times ttpred = np.array(self.G * self.mopt).flatten() + ttref # predicted tt vobs = dists / ttobs # observed velocities vpred = dists / ttpred # predicted velocities if not relative: return vobs - vpred else: return (vobs - vpred) / vobs def checkerboard_func(self, vmid, vmin, vmax, squaresize, shape='cos'): """ Returns a checkerboard function, f(lons, lats), whose background value is *vmid*, and alternating min/max values are *vmin* and *vmax*. The centers of the anomalies are separated by *squaresize* (in km), and their shape is either 'gaussian' or 'cos'. @rtype: function """ # converting square size from km to degrees d2rad = np.pi / 180.0 midlat = 0.5 * (self.grid.ymin + self.grid.get_ymax()) latwidth = squaresize / 6371.0 / d2rad lonwidth = squaresize / (6371.0 * np.cos(midlat * d2rad)) / d2rad # Basis function defining an anomaly of # unit height centered at (*lon0*, *lat0*). if shape.lower().strip() == 'gaussian': def basis_func(lons, lats, lon0, lat0): """ Gausian anomaly , with sigma-parameter such that 3 sigma is the distance between the center and the border of the square, that is, half the distance between 2 centers. """ n = len(lons) r = psutils.dist(lons1=lons, lats1=lats, lons2=n*[lon0], lats2=n*[lat0]) sigma = squaresize / 6.0 return np.exp(- r**2 / (2 * sigma**2)) elif shape.lower().strip() == 'cos': def basis_func(lons, lats, lon0, lat0): """ Cosinus anomaly """ x = (lons - lon0) / lonwidth y = (lats - lat0) / latwidth outside_square = (np.abs(x) >= 0.5) | (np.abs(y) >= 0.5) return np.where(outside_square, 0.0, np.cos(np.pi*x) * np.cos(np.pi*y)) else: raise Exception("Unknown shape anomaly: " + shape) # coordinates of the center of the anomalies startlon = self.grid.xmin + lonwidth / 2.0 stoplon = self.grid.get_xmax() + lonwidth centerlons = list(np.arange(startlon, stoplon, lonwidth)) startlat = self.grid.ymin + latwidth / 2.0 stoplat = self.grid.get_ymax() + latwidth centerlats = list(np.arange(startlat, stoplat, latwidth)) centerlonlats = list(it.product(centerlons, centerlats)) # factors by which multiply the basis function associated # with each center (to alternate lows and highs) polarities = [(centerlons.index(lon) + centerlats.index(lat)) % 2 for lon, lat in centerlonlats] factors = np.where(np.array(polarities) == 1, vmax - vmid, vmin - vmid) def func(lons, lats): """ Checkboard function: sum of the basis functions along the centers defined above, times the high/low factor, plus background velocity. """ lowhighs = [f * basis_func(lons, lats, lon0, lat0) for f, (lon0, lat0) in zip(factors, centerlonlats)] return vmid + sum(lowhighs) return func def checkerboard_test(self, vmid, vmin, vmax, squaresize, **kwargs): """ Generates synthetic data (travel time perturbations), dsynth, from a checkerboard model of velocities, and performs a tomographic inversion on them: m = (Gt.C^-1.G + Q)^-1.Gt.C^-1.dsynth = Ginv.C^-1.dsynth Returns the vector of best-fitting parameters, m. @rtype: L{matrix} """ # checkerboard function f_checkerboard = self.checkerboard_func(vmid, vmin, vmax, squaresize, **kwargs) # setting up vector of synthetic data dsynth = np.zeros_like(self.dobs) for d, path, curve in zip(dsynth, self.paths, self.disp_curves): # array of infinitesimal distances along path lons, lats = path[:, 0], path[:, 1] ds = psutils.dist(lons1=lons[:-1], lats1=lats[:-1], lons2=lons[1:], lats2=lats[1:]) # velocities along path v = f_checkerboard(lons, lats) # travel time = integral[ds / v] t = np.sum(ds * 0.5 * (1.0 / v[:-1] + 1.0 / v[1:])) # synthetic data = travel time - ref travel time d[...] = t - curve.dist() / vmid # inverting synthetic data m = self.Ginv * self.Cinv * dsynth return m def plot(self, xsize=20, title=None, showplot=True, outfile=None, **kwargs): """ Plots velocity perturbation, path density and spatial resolution, and returns the figure. Additional keyword args in *kwargs* are sent to self.plot_velocity(), self.plot_pathdensity() and self.plot_resolution(), when applicable @rtype: L{matplotlib.figure.Figure} """ # bounding box bbox = self.grid.bbox() aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0]) figsize = (xsize, aspectratio * xsize / 3.0 + 2) fig = plt.figure(figsize=figsize) # layout gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0) # plotting velocity perturbation ax = fig.add_subplot(gs[0, 0]) subkwargs = {'ax': ax, 'plot_title': False} # sending additional arguments (when applicable) subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_velocity).args if k in kwargs}) self.plot_velocity(**subkwargs) # plotting path density ax = fig.add_subplot(gs[0, 1]) subkwargs = {'ax': ax, 'plot_title': False, 'stationlabel': True} # sending additional arguments (when applicable) subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_pathdensity).args if k in kwargs}) self.plot_pathdensity(**subkwargs) # plotting spatial resolution ax = fig.add_subplot(gs[0, 2]) subkwargs = {'ax': ax, 'plot_title': False} # sending additional arguments (when applicable) subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_resolution).args if k in kwargs}) self.plot_resolution(**subkwargs) # fig title if not title: # default title if not given title = u'Period = {} s, {} paths' title = title.format(self.period, len(self.paths)) fig.suptitle(title, fontsize=16) gs.tight_layout(fig, rect=[0, 0, 1, 0.95]) # saving figure if outfile: if os.path.exists(outfile): # backup shutil.copyfile(outfile, outfile + '~') fig.set_size_inches(figsize) fig.savefig(outfile, dpi=300) # showing figure if showplot: fig.show() return fig def network_plot(self, ax=None, xsize=10, plotdensity=True, plotpaths=True, stationlabel=False, plot_title=True, showgrid=False, highlight_residuals_gt=None): """ Plots network of stations using basemap rather than shapefiles! Also has the option to choose whether or not you want to plot station pair paths or not """ # bounding box bbox = self.grid.bbox() # creating figure if not given as input fig = None if not ax: aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0]) # xzise has not effect if axes are given as input fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True) ax = fig.add_subplot(111) # plotting coasts and tectonic provinces psutils.basemap(ax=ax, labels=False, fill=not plotdensity, bbox=bbox) if plotpaths: # residuals observed/predicted travel-times res = self.traveltime_residuals() if highlight_residuals_gt else [] # plotting paths for i, path in enumerate(self.paths): x, y = zip(*path) linestyle = {'color': 'grey', 'lw': 0.5} if highlight_residuals_gt and abs(float(res[i])) > highlight_residuals_gt: # highlighting line as the travel-time error is > threshold linestyle = {'color': 'black', 'lw': 1.5} ax.plot(x, y, '-', **linestyle) if showgrid: # plotting grid x, y = self.grid.xy_nodes() ax.plot(x, y, '+') # plotting stations self._plot_stations(ax, stationlabel=stationlabel) # formatting axes ax.set_xlim(bbox[:2]) ax.set_ylim(bbox[2:]) if plot_title: ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths))) if fig: fig.show() def plot_pathdensity(self, ax=None, xsize=10, plotdensity=True, plotpaths=True, stationlabel=False, plot_title=True, showgrid=False, highlight_residuals_gt=None): """ Plots path density and/or interstation paths. Paths for which the residual observed/predicted travel-time is greater than *highlight_residuals_gt* (if defined) are highlighted as bold lines. """ # bounding box bbox = self.grid.bbox() # creating figure if not given as input fig = None if not ax: aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0]) # xzise has not effect if axes are given as input fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True) ax = fig.add_subplot(111) # plotting coasts and tectonic provinces psutils.basemap(ax=ax, labels=False, fill=not plotdensity, bbox=bbox) if plotdensity: # plotting path density d = self.grid.to_2D_array(self.density) extent = (self.grid.xmin, self.grid.get_xmax(), self.grid.ymin, self.grid.get_ymax()) m = ax.imshow(d.transpose(), origin='bottom', extent=extent, interpolation='bicubic', cmap=CMAP_DENSITY, vmin=0) c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1) c.set_label('Path density') if plotpaths: # residuals observed/predicted travel-times res = self.traveltime_residuals() if highlight_residuals_gt else [] # plotting paths for i, path in enumerate(self.paths): x, y = zip(*path) linestyle = {'color': 'grey', 'lw': 0.5} if highlight_residuals_gt and abs(float(res[i])) > highlight_residuals_gt: # highlighting line as the travel-time error is > threshold linestyle = {'color': 'black', 'lw': 1.5} ax.plot(x, y, '-', **linestyle) if showgrid: # plotting grid x, y = self.grid.xy_nodes() ax.plot(x, y, '+') # plotting stations self._plot_stations(ax, stationlabel=stationlabel) # formatting axes ax.set_xlim(bbox[:2]) ax.set_ylim(bbox[2:]) if plot_title: ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths))) if fig: fig.show() def plot_velocity(self, ax=None, xsize=10, perturbation=False, plot_title=True, vscale=None): """ Plots velocity or perturbation relative to mean velocity (which is not necessarily the reference velocity) """ # bounding box bbox = self.grid.bbox() # creating figure if not given as input fig = None if not ax: aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0]) # xzise has not effect if axes are given as input fig = plt.figure(figsize=(xsize, aspectratio * xsize)) ax = fig.add_subplot(111) # plotting coasts and tectonic provinces psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox) # plotting stations self._plot_stations(ax, stationlabel=False) # velocities on grid: m = (v0 - v) / v, so v = v0 / (1 + m) v = self.grid.to_2D_array(self.v0 / (1 + self.mopt)) vmean = v.mean() if perturbation: # plotting % perturbation relative to mean velocity v = 100 * (v - vmean) / vmean if not vscale and perturbation: # symetric scale maxdv = np.abs(v).max() vscale = (-maxdv, maxdv) elif not vscale and not perturbation: # scale centered on mean velocity maxdv = np.abs(v - vmean).max() vscale = (vmean - maxdv, vmean + maxdv) extent = (self.grid.xmin, self.grid.get_xmax(), self.grid.ymin, self.grid.get_ymax()) m = ax.imshow(v.transpose(), origin='bottom', extent=extent, interpolation='bicubic', cmap=CMAP_SEISMIC, vmin=vscale[0], vmax=vscale[1]) c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1) c.set_label('Velocity perturbation (%)' if perturbation else 'Velocity (km/s)') # formatting axes ax.set_xlim(bbox[:2]) ax.set_ylim(bbox[2:]) if plot_title: ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths))) if fig: fig.show() def plot_resolution(self, ax=None, xsize=10, plot_title=True): """ Plots resolution map """ # bounding box bbox = self.grid.bbox() # creating figure if not given as input fig = None if not ax: aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0]) # xzise has not effect if axes are given as input fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True) ax = fig.add_subplot(111) # plotting coasts and tectonic provinces psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox) # plotting stations self._plot_stations(ax, stationlabel=False) # plotting spatial resolution r = self.grid.to_2D_array(self.Rradius) extent = (self.grid.xmin, self.grid.get_xmax(), self.grid.ymin, self.grid.get_ymax()) m = ax.imshow(r.transpose(), origin='bottom', extent=extent, interpolation='bicubic', cmap=CMAP_RESOLUTION) c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1) c.set_label('Spatial resolution (km)') # formatting axes ax.set_xlim(bbox[:2]) ax.set_ylim(bbox[2:]) if plot_title: ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths))) if fig: fig.show() def plot_checkerboard(self, vmid, vmin, vmax, squaresize, axes=None, xsize=10, **kwargs): """ Plots checkboard model and reconstructed checkerboard """ # checkerboard test m = self.checkerboard_test(vmid, vmin, vmax, squaresize, **kwargs) v = self.grid.to_2D_array(vmid / (1 + m)) dv = 100 * (v - vmid) / vmid # bounding box bbox = self.grid.bbox() # creating figure if not given as input fig = None if not axes: aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0]) # xzise has not effect if axes are given as input fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True) axes = [fig.add_subplot(121), fig.add_subplot(122)] ims = [] # checkerboard model checkerboard_func = self.checkerboard_func(vmid, vmin, vmax, squaresize, **kwargs) lons, lats = self.grid.xy_nodes() a = self.grid.to_2D_array(checkerboard_func(lons, lats)) extent = (self.grid.xmin, self.grid.get_xmax(), self.grid.ymin, self.grid.get_ymax()) im = axes[0].imshow(a.transpose(), origin='bottom', extent=extent, interpolation='bicubic', vmin=vmin, vmax=vmax, cmap=CMAP_SEISMIC) ims.append(im) # reconstructed checkerboard extent = (self.grid.xmin, self.grid.get_xmax(), self.grid.ymin, self.grid.get_ymax()) im = axes[1].imshow(dv.transpose(), origin='bottom', extent=extent, interpolation='bicubic', vmin=-np.abs(dv).max(), vmax=np.abs(dv).max(), cmap=CMAP_SEISMIC) ims.append(im) for ax, im in zip(axes, ims): # coasts and tectonic provinces psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox) # stations self._plot_stations(ax, stationlabel=False) # color bar c = plt.colorbar(im, ax=ax, orientation='horizontal', pad=0.1) c.set_label('km/s' if ax is axes[0] else '% perturbation') # limits ax.set_xlim(bbox[:2]) ax.set_ylim(bbox[2:]) if fig: fig.show() def _plot_stations(self, ax, stationlabel): """ Plots stations on map """ # plotting stations xylabels = [c.station1.coord + (c.station1.name,) for c in self.disp_curves] + \ [c.station2.coord + (c.station2.name,) for c in self.disp_curves] xlist, ylist, labels = zip(*list(set(xylabels))) ax.plot(xlist, ylist, '^', color='k', ms=10, mfc='w', mew=1) if not stationlabel: return # stations label for x, y, label in zip(xlist, ylist, labels): ax.text(x, y, label, ha='center', va='bottom', fontsize=10, weight='bold') def pathdensity_colormap(dmax): """ Builds a colormap for path density (d) varying from 0 to *dmax*: - white for d = 0 - blue to green for 1 <= d <= 5 - green to red for 5 <= d <= 10 - red to black for 10 <= d <= dmax """ dmax = max(dmax, 11) x1 = 1.0 / dmax x2 = 5.0 / dmax x3 = 10.0 / dmax cdict = {'red': ((0, 1, 1), (x1, 0, 0), (x2, 0, 0), (x3, 1, 1), (1, 0, 0)), 'green': ((0, 1, 1), (x1, 0, 0), (x2, 1, 1), (x3, 0, 0), (1, 0, 0)), 'blue': ((0, 1, 1), (x1, 1, 1), (x2, 0, 0), (x3, 0, 0), (1, 0, 0))} return LinearSegmentedColormap('tmp', cdict) if __name__ == '__main__': # importig dir of FTAN results from psconfig import FTAN_DIR # loading dispersion curves flist = sorted(glob.glob(os.path.join(FTAN_DIR, 'FTAN*.pickle*'))) print 'Select file containing dispersion curves:' print '\n'.join('{} - {}'.format(i, os.path.basename(f)) for i, f in enumerate(flist)) pickle_file = flist[int(raw_input('\n'))] f = open(pickle_file, 'rb') curves = pickle.load(f) f.close() print "Dispersion curves stored in variable 'curves'"
gpl-3.0
alephu5/Soundbyte
environment/lib/python3.3/site-packages/pandas/stats/var.py
16
16319
from __future__ import division from pandas.compat import range, lrange, zip, reduce from pandas import compat import numpy as np from pandas.core.base import StringMixin from pandas.util.decorators import cache_readonly from pandas.core.frame import DataFrame from pandas.core.panel import Panel from pandas.core.series import Series import pandas.stats.common as common from pandas.stats.math import inv from pandas.stats.ols import _combine_rhs class VAR(StringMixin): """ Estimates VAR(p) regression on multivariate time series data presented in pandas data structures. Parameters ---------- data : DataFrame or dict of Series p : lags to include """ def __init__(self, data, p=1, intercept=True): try: import statsmodels.tsa.vector_ar.api as sm_var except ImportError: import scikits.statsmodels.tsa.var as sm_var self._data = DataFrame(_combine_rhs(data)) self._p = p self._columns = self._data.columns self._index = self._data.index self._intercept = intercept @cache_readonly def aic(self): """Returns the Akaike information criterion.""" return self._ic['aic'] @cache_readonly def bic(self): """Returns the Bayesian information criterion.""" return self._ic['bic'] @cache_readonly def beta(self): """ Returns a DataFrame, where each column x1 contains the betas calculated by regressing the x1 column of the VAR input with the lagged input. Returns ------- DataFrame """ d = dict([(key, value.beta) for (key, value) in compat.iteritems(self.ols_results)]) return DataFrame(d) def forecast(self, h): """ Returns a DataFrame containing the forecasts for 1, 2, ..., n time steps. Each column x1 contains the forecasts of the x1 column. Parameters ---------- n: int Number of time steps ahead to forecast. Returns ------- DataFrame """ forecast = self._forecast_raw(h)[:, 0, :] return DataFrame(forecast, index=lrange(1, 1 + h), columns=self._columns) def forecast_cov(self, h): """ Returns the covariance of the forecast residuals. Returns ------- DataFrame """ return [DataFrame(value, index=self._columns, columns=self._columns) for value in self._forecast_cov_raw(h)] def forecast_std_err(self, h): """ Returns the standard errors of the forecast residuals. Returns ------- DataFrame """ return DataFrame(self._forecast_std_err_raw(h), index=lrange(1, 1 + h), columns=self._columns) @cache_readonly def granger_causality(self): """Returns the f-stats and p-values from the Granger Causality Test. If the data consists of columns x1, x2, x3, then we perform the following regressions: x1 ~ L(x2, x3) x1 ~ L(x1, x3) x1 ~ L(x1, x2) The f-stats of these results are placed in the 'x1' column of the returned DataFrame. We then repeat for x2, x3. Returns ------- Dict, where 'f-stat' returns the DataFrame containing the f-stats, and 'p-value' returns the DataFrame containing the corresponding p-values of the f-stats. """ from pandas.stats.api import ols from scipy.stats import f d = {} for col in self._columns: d[col] = {} for i in range(1, 1 + self._p): lagged_data = self._lagged_data[i].filter( self._columns - [col]) for key, value in compat.iteritems(lagged_data): d[col][_make_param_name(i, key)] = value f_stat_dict = {} p_value_dict = {} for col, y in compat.iteritems(self._data): ssr_full = (self.resid[col] ** 2).sum() f_stats = [] p_values = [] for col2 in self._columns: result = ols(y=y, x=d[col2]) resid = result.resid ssr_reduced = (resid ** 2).sum() M = self._p N = self._nobs K = self._k * self._p + 1 f_stat = ((ssr_reduced - ssr_full) / M) / (ssr_full / (N - K)) f_stats.append(f_stat) p_value = f.sf(f_stat, M, N - K) p_values.append(p_value) f_stat_dict[col] = Series(f_stats, self._columns) p_value_dict[col] = Series(p_values, self._columns) f_stat_mat = DataFrame(f_stat_dict) p_value_mat = DataFrame(p_value_dict) return { 'f-stat': f_stat_mat, 'p-value': p_value_mat, } @cache_readonly def ols_results(self): """ Returns the results of the regressions: x_1 ~ L(X) x_2 ~ L(X) ... x_k ~ L(X) where X = [x_1, x_2, ..., x_k] and L(X) represents the columns of X lagged 1, 2, ..., n lags (n is the user-provided number of lags). Returns ------- dict """ from pandas.stats.api import ols d = {} for i in range(1, 1 + self._p): for col, series in compat.iteritems(self._lagged_data[i]): d[_make_param_name(i, col)] = series result = dict([(col, ols(y=y, x=d, intercept=self._intercept)) for col, y in compat.iteritems(self._data)]) return result @cache_readonly def resid(self): """ Returns the DataFrame containing the residuals of the VAR regressions. Each column x1 contains the residuals generated by regressing the x1 column of the input against the lagged input. Returns ------- DataFrame """ d = dict([(col, series.resid) for (col, series) in compat.iteritems(self.ols_results)]) return DataFrame(d, index=self._index) @cache_readonly def summary(self): template = """ %(banner_top)s Number of Observations: %(nobs)d AIC: %(aic).3f BIC: %(bic).3f %(banner_coef)s %(coef_table)s %(banner_end)s """ params = { 'banner_top': common.banner('Summary of VAR'), 'banner_coef': common.banner('Summary of Estimated Coefficients'), 'banner_end': common.banner('End of Summary'), 'coef_table': self.beta, 'aic': self.aic, 'bic': self.bic, 'nobs': self._nobs, } return template % params @cache_readonly def _alpha(self): """ Returns array where the i-th element contains the intercept when regressing the i-th column of self._data with the lagged data. """ if self._intercept: return self._beta_raw[-1] else: return np.zeros(self._k) @cache_readonly def _beta_raw(self): return np.array([list(self.beta[col].values()) for col in self._columns]).T def _trans_B(self, h): """ Returns 0, 1, ..., (h-1)-th power of transpose of B as defined in equation (4) on p. 142 of the Stata 11 Time Series reference book. """ result = [np.eye(1 + self._k * self._p)] row1 = np.zeros((1, 1 + self._k * self._p)) row1[0, 0] = 1 v = self._alpha.reshape((self._k, 1)) row2 = np.hstack(tuple([v] + self._lag_betas)) m = self._k * (self._p - 1) row3 = np.hstack(( np.zeros((m, 1)), np.eye(m), np.zeros((m, self._k)) )) trans_B = np.vstack((row1, row2, row3)).T result.append(trans_B) for i in range(2, h): result.append(np.dot(trans_B, result[i - 1])) return result @cache_readonly def _x(self): values = np.array([ list(self._lagged_data[i][col].values()) for i in range(1, 1 + self._p) for col in self._columns ]).T x = np.hstack((np.ones((len(values), 1)), values))[self._p:] return x @cache_readonly def _cov_beta(self): cov_resid = self._sigma x = self._x inv_cov_x = inv(np.dot(x.T, x)) return np.kron(inv_cov_x, cov_resid) def _data_xs(self, i): """ Returns the cross-section of the data at the given timestep. """ return self._data.values[i] def _forecast_cov_raw(self, n): resid = self._forecast_cov_resid_raw(n) # beta = self._forecast_cov_beta_raw(n) # return [a + b for a, b in zip(resid, beta)] # TODO: ignore the beta forecast std err until it's verified return resid def _forecast_cov_beta_raw(self, n): """ Returns the covariance of the beta errors for the forecast at 1, 2, ..., n timesteps. """ p = self._p values = self._data.values T = len(values) - self._p - 1 results = [] for h in range(1, n + 1): psi = self._psi(h) trans_B = self._trans_B(h) sum = 0 cov_beta = self._cov_beta for t in range(T + 1): index = t + p y = values.take(lrange(index, index - p, -1), axis=0).ravel() trans_Z = np.hstack(([1], y)) trans_Z = trans_Z.reshape(1, len(trans_Z)) sum2 = 0 for i in range(h): ZB = np.dot(trans_Z, trans_B[h - 1 - i]) prod = np.kron(ZB, psi[i]) sum2 = sum2 + prod sum = sum + chain_dot(sum2, cov_beta, sum2.T) results.append(sum / (T + 1)) return results def _forecast_cov_resid_raw(self, h): """ Returns the covariance of the residual errors for the forecast at 1, 2, ..., h timesteps. """ psi_values = self._psi(h) sum = 0 result = [] for i in range(h): psi = psi_values[i] sum = sum + chain_dot(psi, self._sigma, psi.T) result.append(sum) return result def _forecast_raw(self, h): """ Returns the forecast at 1, 2, ..., h timesteps in the future. """ k = self._k result = [] for i in range(h): sum = self._alpha.reshape(1, k) for j in range(self._p): beta = self._lag_betas[j] idx = i - j if idx > 0: y = result[idx - 1] else: y = self._data_xs(idx - 1) sum = sum + np.dot(beta, y.T).T result.append(sum) return np.array(result) def _forecast_std_err_raw(self, h): """ Returns the standard error of the forecasts at 1, 2, ..., n timesteps. """ return np.array([np.sqrt(np.diag(value)) for value in self._forecast_cov_raw(h)]) @cache_readonly def _ic(self): """ Returns the Akaike/Bayesian information criteria. """ RSS = self._rss k = self._p * (self._k * self._p + 1) n = self._nobs * self._k return {'aic': 2 * k + n * np.log(RSS / n), 'bic': n * np.log(RSS / n) + k * np.log(n)} @cache_readonly def _k(self): return len(self._columns) @cache_readonly def _lag_betas(self): """ Returns list of B_i, where B_i represents the (k, k) matrix with the j-th row containing the betas of regressing the j-th column of self._data with self._data lagged i time steps. First element is B_1, second element is B_2, etc. """ k = self._k b = self._beta_raw return [b[k * i: k * (i + 1)].T for i in range(self._p)] @cache_readonly def _lagged_data(self): return dict([(i, self._data.shift(i)) for i in range(1, 1 + self._p)]) @cache_readonly def _nobs(self): return len(self._data) - self._p def _psi(self, h): """ psi value used for calculating standard error. Returns [psi_0, psi_1, ..., psi_(h - 1)] """ k = self._k result = [np.eye(k)] for i in range(1, h): result.append(sum( [np.dot(result[i - j], self._lag_betas[j - 1]) for j in range(1, 1 + i) if j <= self._p])) return result @cache_readonly def _resid_raw(self): resid = np.array([self.ols_results[col]._resid_raw for col in self._columns]) return resid @cache_readonly def _rss(self): """Returns the sum of the squares of the residuals.""" return (self._resid_raw ** 2).sum() @cache_readonly def _sigma(self): """Returns covariance of resids.""" k = self._k n = self._nobs resid = self._resid_raw return np.dot(resid, resid.T) / (n - k) def __unicode__(self): return self.summary def lag_select(data, max_lags=5, ic=None): """ Select number of lags based on a variety of information criteria Parameters ---------- data : DataFrame-like max_lags : int Maximum number of lags to evaluate ic : {None, 'aic', 'bic', ...} Choosing None will just display the results Returns ------- None """ pass class PanelVAR(VAR): """ Performs Vector Autoregression on panel data. Parameters ---------- data: Panel or dict of DataFrame lags: int """ def __init__(self, data, lags, intercept=True): self._data = _prep_panel_data(data) self._p = lags self._intercept = intercept self._columns = self._data.items @cache_readonly def _nobs(self): """Returns the number of observations.""" _, timesteps, entities = self._data.values.shape return (timesteps - self._p) * entities @cache_readonly def _rss(self): """Returns the sum of the squares of the residuals.""" return (self.resid.values ** 2).sum() def forecast(self, h): """ Returns the forecasts at 1, 2, ..., n timesteps in the future. """ forecast = self._forecast_raw(h).T.swapaxes(1, 2) index = lrange(1, 1 + h) w = Panel(forecast, items=self._data.items, major_axis=index, minor_axis=self._data.minor_axis) return w @cache_readonly def resid(self): """ Returns the DataFrame containing the residuals of the VAR regressions. Each column x1 contains the residuals generated by regressing the x1 column of the input against the lagged input. Returns ------- DataFrame """ d = dict([(key, value.resid) for (key, value) in compat.iteritems(self.ols_results)]) return Panel.fromDict(d) def _data_xs(self, i): return self._data.values[:, i, :].T @cache_readonly def _sigma(self): """Returns covariance of resids.""" k = self._k resid = _drop_incomplete_rows(self.resid.toLong().values) n = len(resid) return np.dot(resid.T, resid) / (n - k) def _prep_panel_data(data): """Converts the given data into a Panel.""" if isinstance(data, Panel): return data return Panel.fromDict(data) def _drop_incomplete_rows(array): mask = np.isfinite(array).all(1) indices = np.arange(len(array))[mask] return array.take(indices, 0) def _make_param_name(lag, name): return 'L%d.%s' % (lag, name) def chain_dot(*matrices): """ Returns the dot product of the given matrices. Parameters ---------- matrices: argument list of ndarray """ return reduce(lambda x, y: np.dot(y, x), matrices[::-1])
gpl-3.0
ryandougherty/mwa-capstone
MWA_Tools/build/matplotlib/examples/api/logo2.py
3
2716
""" Thanks to Tony Yu <[email protected]> for the logo design """ import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.mlab as mlab from pylab import rand mpl.rcParams['xtick.labelsize'] = 10 mpl.rcParams['ytick.labelsize'] = 12 mpl.rcParams['axes.edgecolor'] = 'gray' axalpha = 0.05 #figcolor = '#EFEFEF' figcolor = 'white' dpi = 80 fig = plt.figure(figsize=(6, 1.1),dpi=dpi) fig.figurePatch.set_edgecolor(figcolor) fig.figurePatch.set_facecolor(figcolor) def add_math_background(): ax = fig.add_axes([0., 0., 1., 1.]) text = [] text.append((r"$W^{3\beta}_{\delta_1 \rho_1 \sigma_2} = U^{3\beta}_{\delta_1 \rho_1} + \frac{1}{8 \pi 2} \int^{\alpha_2}_{\alpha_2} d \alpha^\prime_2 \left[\frac{ U^{2\beta}_{\delta_1 \rho_1} - \alpha^\prime_2U^{1\beta}_{\rho_1 \sigma_2} }{U^{0\beta}_{\rho_1 \sigma_2}}\right]$", (0.7, 0.2), 20)) text.append((r"$\frac{d\rho}{d t} + \rho \vec{v}\cdot\nabla\vec{v} = -\nabla p + \mu\nabla^2 \vec{v} + \rho \vec{g}$", (0.35, 0.9), 20)) text.append((r"$\int_{-\infty}^\infty e^{-x^2}dx=\sqrt{\pi}$", (0.15, 0.3), 25)) #text.append((r"$E = mc^2 = \sqrt{{m_0}^2c^4 + p^2c^2}$", # (0.7, 0.42), 30)) text.append((r"$F_G = G\frac{m_1m_2}{r^2}$", (0.85, 0.7), 30)) for eq, (x, y), size in text: ax.text(x, y, eq, ha='center', va='center', color="#11557c", alpha=0.25, transform=ax.transAxes, fontsize=size) ax.set_axis_off() return ax def add_matplotlib_text(ax): ax.text(0.95, 0.5, 'matplotlib', color='#11557c', fontsize=65, ha='right', va='center', alpha=1.0, transform=ax.transAxes) def add_polar_bar(): ax = fig.add_axes([0.025, 0.075, 0.2, 0.85], polar=True, resolution=50) ax.axesPatch.set_alpha(axalpha) ax.set_axisbelow(True) N = 7 arc = 2. * np.pi theta = np.arange(0.0, arc, arc/N) radii = 10 * np.array([0.2, 0.6, 0.8, 0.7, 0.4, 0.5, 0.8]) width = np.pi / 4 * np.array([0.4, 0.4, 0.6, 0.8, 0.2, 0.5, 0.3]) bars = ax.bar(theta, radii, width=width, bottom=0.0) for r, bar in zip(radii, bars): bar.set_facecolor(cm.jet(r/10.)) bar.set_alpha(0.6) for label in ax.get_xticklabels() + ax.get_yticklabels(): label.set_visible(False) for line in ax.get_ygridlines() + ax.get_xgridlines(): line.set_lw(0.8) line.set_alpha(0.9) line.set_ls('-') line.set_color('0.5') ax.set_yticks(np.arange(1, 9, 2)) ax.set_rmax(9) if __name__ == '__main__': main_axes = add_math_background() add_polar_bar() add_matplotlib_text(main_axes) plt.show()
gpl-2.0
abimannans/scikit-learn
sklearn/utils/tests/test_murmurhash.py
261
2836
# Author: Olivier Grisel <[email protected]> # # License: BSD 3 clause import numpy as np from sklearn.externals.six import b, u from sklearn.utils.murmurhash import murmurhash3_32 from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from nose.tools import assert_equal, assert_true def test_mmhash3_int(): assert_equal(murmurhash3_32(3), 847579505) assert_equal(murmurhash3_32(3, seed=0), 847579505) assert_equal(murmurhash3_32(3, seed=42), -1823081949) assert_equal(murmurhash3_32(3, positive=False), 847579505) assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505) assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949) assert_equal(murmurhash3_32(3, positive=True), 847579505) assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505) assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347) def test_mmhash3_int_array(): rng = np.random.RandomState(42) keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32) keys = keys.reshape((3, 2, 1)) for seed in [0, 42]: expected = np.array([murmurhash3_32(int(k), seed) for k in keys.flat]) expected = expected.reshape(keys.shape) assert_array_equal(murmurhash3_32(keys, seed), expected) for seed in [0, 42]: expected = np.array([murmurhash3_32(k, seed, positive=True) for k in keys.flat]) expected = expected.reshape(keys.shape) assert_array_equal(murmurhash3_32(keys, seed, positive=True), expected) def test_mmhash3_bytes(): assert_equal(murmurhash3_32(b('foo'), 0), -156908512) assert_equal(murmurhash3_32(b('foo'), 42), -1322301282) assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784) assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014) def test_mmhash3_unicode(): assert_equal(murmurhash3_32(u('foo'), 0), -156908512) assert_equal(murmurhash3_32(u('foo'), 42), -1322301282) assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784) assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014) def test_no_collision_on_byte_range(): previous_hashes = set() for i in range(100): h = murmurhash3_32(' ' * i, 0) assert_true(h not in previous_hashes, "Found collision on growing empty string") def test_uniform_distribution(): n_bins, n_samples = 10, 100000 bins = np.zeros(n_bins, dtype=np.float) for i in range(n_samples): bins[murmurhash3_32(i, positive=True) % n_bins] += 1 means = bins / n_samples expected = np.ones(n_bins) / n_bins assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
bsd-3-clause
dsavransky/EXOSIMS
EXOSIMS/util/plotPlanetPopRvsAandDetectedRvsA.py
1
24776
""" This plotting utility creates joint probability distributions of planetary radius vs semi-major axis for the SimulatedUniverse specified by outspec.json and the detected planet population aggregated from all .pkl files in 'folder'. Side histograms represent occurence frequency of the parameter per simulation. In the grid version, the number represents the summation of values in each cell. Plot Planet Population Radius vs a AND Detected Planet Rp vs a Plot will be saved to the directory specified by PPoutpath Written by Dean Keithly on 5/6/2018 Updated 2/7/2019 """ import random as myRand import sys, os.path, EXOSIMS, EXOSIMS.MissionSim try: import cPickle as pickle except: import pickle import os import numpy as np #from pylab import * from numpy import nan if not 'DISPLAY' in os.environ.keys(): #Check environment for keys import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt else: import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import argparse import json from EXOSIMS.util.read_ipcluster_ensemble import gen_summary from EXOSIMS.util.read_ipcluster_ensemble import read_all from numpy import linspace from matplotlib.ticker import NullFormatter, MaxNLocator from matplotlib import ticker import astropy.units as u import matplotlib.patheffects as PathEffects import datetime import re from EXOSIMS.util.vprint import vprint from matplotlib.colors import LogNorm class plotPlanetPopRvsAandDetectedRvsA(object): """Designed to plot Rp vs a of Planet Population Generated and Planet Population Observed """ _modtype = 'util' def __init__(self, args=None): """ Args: args (dict) - 'file' keyword specifies specific pkl file to use """ self.args = args pass def singleRunPostProcessing(self, PPoutpath, folder): """Generates a single yield histogram for the run_type Args: PPoutpath (string) - output path to place data in folder (string) - full filepath to folder containing runs """ if not os.path.exists(folder):#Folder must exist raise ValueError('%s not found'%folder) if not os.path.exists(PPoutpath):#PPoutpath must exist raise ValueError('%s not found'%PPoutpath) outspecfile = os.path.join(folder,'outspec.json') if not os.path.exists(outspecfile):#outspec file not found raise ValueError('%s not found'%outspecfile) #Extract Data from folder containing pkl files out = gen_summary(folder)#out contains information on the detected planets allres = read_all(folder)# contains all drm from all missions in folder #Convert Extracted Data to x,y x, y, z = self.extractXY(out, allres) # Define the x and y data for detected planets det_Rps = np.concatenate(out['Rps']).ravel() # Planet Radius in Earth Radius of detected planets det_smas = np.concatenate(out['smas']).ravel() det_eccens = np.concatenate(out['es']).ravel() #Create Mission Object To Extract Some Plotting Limits sim = EXOSIMS.MissionSim.MissionSim(outspecfile, nopar=True) ymax = np.nanmax(sim.PlanetPhysicalModel.ggdat['radii']).to('earthRad').value ################ #Create Figure and define gridspec fig2 = plt.figure(2, figsize=(8.5,4.5)) gs = gridspec.GridSpec(3,5, width_ratios=[6,1,0.3,6,1.25], height_ratios=[0.2,1,4]) gs.update(wspace=0.06, hspace=0.06) # set the spacing between axes. plt.rc('axes',linewidth=2) plt.rc('lines',linewidth=2) plt.rcParams['axes.linewidth']=2 plt.rc('font',weight='bold') #What the plot layout looks like ###----------------------------------- # | gs[0] gs[1] gs[2] gs[3] gs[4] | # | gs[5] gs[6] gs[7] gs[8] gs[9] | # | gs[10] gs[11] gs[12] gs[13] gs[14]| ###----------------------------------- ax1 = plt.subplot(gs[5+5])#2D histogram of planet pop ax2 = plt.subplot(gs[0+5])#1D histogram of a ax3 = plt.subplot(gs[6+5])#1D histogram of Rp ax4 = plt.subplot(gs[8+5])#2D histogram of detected Planet Population ax5 = plt.subplot(gs[3+5])#1D histogram of detected planet a ax6 = plt.subplot(gs[9+5])#1D histogram of detected planet Rp TXT1 = plt.subplot(gs[1+5]) TXT4 = plt.subplot(gs[4+5]) axCBAR = plt.subplot(gs[0:5]) # Set up default x and y limits print(min(x)) xlims = [sim.PlanetPopulation.arange[0].value, sim.PlanetPopulation.arange[1].value]#[min(x),max(x)]# of aPOP ylims = [min(y),ymax]#max(y)]# of RpPOp xmin = xlims[0] xmax = xlims[1] ymin = ylims[0] ymax = ylims[1] # Make the 'main' temperature plot # Define the number of bins nxbins = 50# a bins nybins = 50# Rp bins nbins = 100 xbins = np.logspace(start = np.log10(xmin), stop = np.log10(xmax), num = nxbins) ybins = np.logspace(start = np.log10(ymin), stop = np.log10(ymax), num = nybins) xcenter = (xbins[0:-1]+xbins[1:])/2.0 ycenter = (ybins[0:-1]+ybins[1:])/2.0 aspectratio = 1.0*(xmax - 0)/(1.0*ymax - 0) H, xedges,yedges = np.histogram2d(x,y,bins=(xbins,ybins),density=True)#normed=True) X = xcenter Y = ycenter Z = H #To calculate area under H # tmpx = np.diff(xedges) # tmpy = np.diff(yedges) # tmpmat = np.transpose(np.asarray(tmpx,ndmin))*tmpy # #test to be sure it is correct # tmpmat[1,2] == tmpx[1]*tmpy[2] #this should be true # np.sum(tmpmat*H) #this should equal1 # Plot the temperature data xcents = np.diff(xbins)/2.+xbins[:-1] ycents = np.diff(ybins)/2.+ybins[:-1] #Plots the contour lines for ax1 tmpH = H tmpH[H==0.] = np.nan cscaleMin = np.floor(np.nanmin(np.log10(tmpH))) # 10**min, min order of magnitude cscaleMax = np.ceil(np.nanmax(np.log10(tmpH))) # 10**max, max order of magnitude levels = 10.**np.arange(cscaleMin,cscaleMax+1) cax = ax1.contourf(xcents, ycents, H.T, extent=[xmin, xmax, ymin, ymax], cmap='jet', levels=levels, norm = LogNorm())#locator=ticker.LogLocator()) CS4 = ax1.contour(cax, colors=('k',), linewidths=(1,), origin='lower', levels=levels, norm = LogNorm())#locator=ticker.LogLocator()) #Add Colorbar cbar = fig2.colorbar(cax, cax=axCBAR, orientation='horizontal')#pad=0.05, plt.rcParams['axes.titlepad']=-10 axCBAR.set_xlabel('Joint Probability Density: Universe (Left) Detected Planets (Right)', weight='bold', labelpad=-35) axCBAR.tick_params(axis='x',direction='in',labeltop=True,labelbottom=False)#'off' cbar.add_lines(CS4) HDET, xedgesDET, yedgesDET = np.histogram2d(det_smas,det_Rps,bins=(xbins,ybins),density=True)#,normed=True) caxDET = ax4.contourf(xcents,ycents,HDET.T, extent=[xmin, xmax, ymin, ymax], cmap='jet', levels=levels, norm = LogNorm())#locator=ticker.LogLocator()) CS42 = ax4.contour(caxDET, colors=('k',), linewidths=(1,), origin='lower', levels=levels, norm = LogNorm())#locator=ticker.LogLocator()) #Set axes scales to log ax1.set_xscale('log') ax1.set_yscale('log') ax2.set_xscale('log') ax3.set_yscale('log') ax4.set_xscale('log') ax4.set_yscale('log') ax5.set_xscale('log') ax6.set_yscale('log') #Plot the axes labels ax1.set_xlabel('Universe Pop.\nSemi-Major Axis, $a$, in ($AU$)',weight='bold', multialignment='center') ax1.set_ylabel('Planet Radius $R_{p}$, in ($R_{\oplus}$)',weight='bold', multialignment='center') ax4.set_xlabel('Detected Planet Pop.\nSemi-Major Axis, $a$, in ($AU$)',weight='bold', multialignment='center') #Set up the histogram bins xbins = np.logspace(start = np.log10(xmin), stop = np.log10(xmax), num = nxbins) ybins = np.logspace(start = np.log10(ymin), stop = np.log10(ymax), num = nybins) #Plot the universe planet pop histograms #*note len(out) should equal len(all_res) #Universe SMA Hist n2, bins2, patches2 = plt.subplot(gs[4+5]).hist(x, bins=xbins, color = 'black', alpha=0., histtype='step',density=True)#,normed=True)#,density=True)#1D histogram of universe a center2 = (bins2[:-1] + bins2[1:]) / 2 width2=np.diff(bins2) ax2.bar(center2, n2*(len(x)/float(len(out['smas']))), align='center', width=width2, color='black', fill='black') #Detected SMA Hist n5, bins5, patches5 = plt.subplot(gs[4+5]).hist(det_smas, bins=xbins, color = 'black', alpha=0., histtype='step',density=True)#,normed=True)#,density=True)#1D histogram of detected planet a center5 = (bins5[:-1] + bins5[1:]) / 2 width5=np.diff(bins5) ax5.bar(center5, n5*(len(det_smas)/float(len(out['smas']))), align='center', width=width5, color='black', fill='black') #Universe Rp Hist n3, bins3, patches3 = plt.subplot(gs[4+5]).hist(y, bins=ybins, color = 'black', alpha=0., histtype='step',density=True)#,normed=True)#,density=True)#1D histogram of detected planet a center3 = (bins3[:-1] + bins3[1:]) / 2 width3=np.diff(bins3) ax3.barh(center3, n3*(len(y)/float(len(out['Rps']))), width3, align='center', color='black') #aDetected Rp Hist n6, bins6, patches6 = plt.subplot(gs[4+5]).hist(det_Rps, bins=ybins, color = 'black', alpha=0., histtype='step',density=True)#,normed=True)#,density=True)#1D histogram of detected planet a center6 = (bins6[:-1] + bins6[1:]) / 2 width6=np.diff(bins6) ax6.barh(center6, n6*(len(det_Rps)/float(len(out['Rps']))), width6, align='center', color='black') #Label Histograms ax2.set_ylabel(r'$\frac{{a\ Freq.}}{{{}\ sims}}$'.format(len(out['Rps'])),weight='bold', multialignment='center') ax3.set_xlabel(r'$\frac{{R_P\ Freq.}}{{{}\ sims}}$'.format(len(out['Rps'])),weight='bold', multialignment='center') ax6.set_xlabel(r'$\frac{{R_P\ Freq.}}{{{}\ sims}}$'.format(len(out['Rps'])),weight='bold', multialignment='center') #Set plot limits ax1.set_xlim(xlims) ax1.set_ylim(ylims) ax2.set_xlim(xlims) ax3.set_ylim(ylims) ax4.set_xlim(xlims) ax4.set_ylim(ylims) ax5.set_xlim(xlims) ax6.set_ylim(ylims) #Remove xticks on x-histogram and remove yticks on y-histogram ax2.set_xticks([]) ax3.set_yticks([]) ax4.set_yticks([]) ax5.set_xticks([]) ax6.set_yticks([]) # Remove the inner axes numbers of the histograms nullfmt = NullFormatter() ax2.xaxis.set_major_formatter(nullfmt) ax3.yaxis.set_major_formatter(nullfmt) ax4.yaxis.set_major_formatter(nullfmt) ax5.xaxis.set_major_formatter(nullfmt) ax6.yaxis.set_major_formatter(nullfmt) axCBAR.yaxis.set_major_formatter(nullfmt) fig2.subplots_adjust(bottom=0.15, top=0.75) TXT1.text(0.0, 0.15, 'Num.\nUniverse\nPlanets:\n%s'%("{:,}".format(len(x))), weight='bold', horizontalalignment='left', fontsize=6) TXT4.text(0.0, 0.15, 'Num.\nDetected\nPlanets:\n%s'%("{:,}".format(len(det_Rps))), weight='bold', horizontalalignment='left', fontsize=6) TXT1.axis('off') TXT4.axis('off') TXT1.xaxis.set_visible(False) TXT1.yaxis.set_visible(False) TXT4.xaxis.set_visible(False) TXT4.yaxis.set_visible(False) # Save to a File date = str(datetime.datetime.now()) date = ''.join(c + '_' for c in re.split('-|:| ',date)[0:-1])#Removes seconds from date fname = 'RpvsSMAdetections_' + folder.split('/')[-1] + '_' + date plt.savefig(os.path.join(PPoutpath, fname + '.png'), format='png', dpi=500) plt.savefig(os.path.join(PPoutpath, fname + '.svg')) plt.savefig(os.path.join(PPoutpath, fname + '.eps'), format='eps', dpi=500) plt.savefig(os.path.join(PPoutpath, fname + '.pdf'), format='pdf', dpi=500) #### Apply Grid to Detected Planet Pop #create coarse grid and calculate total numbers in each bin acoarse1 = np.logspace(np.log10(xlims[0]),np.log10(xlims[1]),6) Rcoarse1 = np.logspace(np.log10(ylims[0]),np.log10(ylims[1]),6) #Calculate 2d Histogram for input bins hcoarse1 = np.histogram2d(np.hstack(det_smas), np.hstack(det_Rps),bins=[acoarse1,Rcoarse1],normed=False)[0] #Plot Vertical and Horizontal Lines for R in Rcoarse1: ax4.plot(xlims,[R]*2,'k--') for a in acoarse1: ax4.plot([a]*2,ylims,'k--') accents1 = np.sqrt(acoarse1[:-1]*acoarse1[1:])#SMA centers for text Rccents1 = np.sqrt(Rcoarse1[:-1]*Rcoarse1[1:])#Rp centers for text #Plot Text for i in np.arange(len(Rccents1)): for j in range(len(accents1)): tmp1 = ax4.text(accents1[j],Rccents1[i],u'%2.2f'%(hcoarse1[j,i]/len(out['smas'])),horizontalalignment='center',verticalalignment='center',weight='bold', color='white', fontsize=8) tmp1.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='k')]) #### Apply Grid to Universe Planet Population acoarse2 = np.logspace(np.log10(xlims[0]),np.log10(xlims[1]),6) Rcoarse2 = np.logspace(np.log10(ylims[0]),np.log10(ylims[1]),6) #Calculate 2d Histogram for input bins hcoarse2 = np.histogram2d(np.hstack(x), np.hstack(y),bins=[acoarse2,Rcoarse2],normed=False)[0] #Plot Vertical and Horizontal Lines for R in Rcoarse2: ax1.loglog(xlims,[R]*2,'k--') for a in acoarse2: ax1.loglog([a]*2,ylims,'k--') accents2 = np.sqrt(acoarse2[:-1]*acoarse2[1:])#SMA centers for text Rccents2 = np.sqrt(Rcoarse2[:-1]*Rcoarse2[1:])#Rp centers for text #Plot Text for i in np.arange(len(Rccents2)): for j in np.arange(len(accents2)): tmp2 = ax1.text(accents2[j],Rccents2[i],u'%2.2f'%(hcoarse2[j,i]/len(out['smas'])),horizontalalignment='center',verticalalignment='center',weight='bold', color='white', fontsize=8) tmp2.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='k')]) # Save to a File fname = 'RpvsSMAdetectionsGridOverlay_' + folder.split('/')[-1] + '_' + date plt.savefig(os.path.join(PPoutpath, fname + '.png'), format='png', dpi=500, bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.svg'), bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.eps'), format='eps', dpi=500, bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.pdf'), format='pdf', dpi=500, bbox_inches='tight') plt.show(block=False) ###### Write Data File On This Plot lines = [] lines.append("Number of Simulations (Count): " + str(len(out['Rps']))) # Universe Plot Limits lines.append('Universe plot xmin (AU): ' + str(xlims[0]) + '\n') lines.append('Universe plot xmax (AU): ' + str(xlims[1]) + '\n') lines.append('Universe plot ymin (Re): ' + str(ylims[0]) + '\n') lines.append('Universe plot ymax (Re): ' + str(ylims[1]) + '\n') lines.append('Universe plot xmin (AU): ' + str(xlims[0]) + '\n') lines.append('Universe plot xmax (AU): ' + str(xlims[1]) + '\n') lines.append('Universe plot ymin (Re): ' + str(ylims[0]) + '\n') lines.append('Universe plot ymax (Re): ' + str(ylims[1]) + '\n') lines.append('Universe plot max text grid val (Count/Sim): ' + str(np.amax(hcoarse1)) + '\n') lines.append('Universe plot Total Detections (Count): ' + str(len(x)) + '\n') lines.append('Universe plot Total Detections per Sim (Count/Sim): ' + str(len(x)/len(out['Rps'])) + '\n') lines.append('Universe Grid Data: xmin_grid (AU), xmax_grid (AU), ymin_grid (Re), ymax_grid (Re), grid_value (Count/Sim)\n') maxBinij = [0,0] maxBinVal = 0. for i in np.arange(len(Rccents2)): for j in np.arange(len(accents2)): lines.append(", ".join([str(acoarse2[i]),str(acoarse2[i+1]),str(Rcoarse2[j]),str(Rcoarse2[j+1]),str(hcoarse2[j,i]/len(out['smas']))]) + '\n') if hcoarse2[j,i]/len(out['smas']) > maxBinVal: maxBinVal = hcoarse2[j,i]/len(out['smas']) maxBinij = [i,j] lines.append('Universe plot Maximum Grid Value (Count/Sim): ' + str(maxBinVal)) lines.append('Universe plot Maximum Grid i,j (index, index): ' + str(maxBinij[0]) + ', ' + str(maxBinij[1])) # Detected Planet Population Limits lines.append('Detected plot xmin (AU): ' + str(xlims[0]) + '\n') lines.append('Detected plot xmax (AU): ' + str(xlims[1]) + '\n') lines.append('Detected plot ymin (Re): ' + str(ylims[0]) + '\n') lines.append('Detected plot ymax (Re): ' + str(ylims[1]) + '\n') lines.append('Detected plot xmin (AU): ' + str(xlims[0]) + '\n') lines.append('Detected plot xmax (AU): ' + str(xlims[1]) + '\n') lines.append('Detected plot ymin (Re): ' + str(ylims[0]) + '\n') lines.append('Detected plot ymax (Re): ' + str(ylims[1]) + '\n') lines.append('Detected plot max text grid val (Count/Sim): ' + str(np.amax(hcoarse2)) + '\n') lines.append('Detected plot Total Detections (Count): ' + str(len(det_Rps)) + '\n') lines.append('Detected plot Total Detections per Sim (Count/Sim): ' + str(len(x)/len(out['Rps'])) + '\n') lines.append('Detected Grid Data: xmin_grid (AU), xmax_grid (AU), ymin_grid (Re), ymax_grid (Re), grid_value (Count/Sim)\n') maxBinij = [0,0] maxBinVal = 0. for i in np.arange(len(Rccents1)): for j in np.arange(len(accents1)): lines.append(", ".join([str(acoarse1[i]),str(acoarse1[i+1]),str(Rcoarse1[j]),str(Rcoarse1[j+1]),str(hcoarse1[j,i]/len(out['smas']))]) + '\n') if hcoarse2[j,i]/len(out['smas']) > maxBinVal: maxBinVal = hcoarse1[j,i]/len(out['smas']) maxBinij = [i,j] lines.append('Detected plot Maximum Grid Value (Count/Sim): ' + str(maxBinVal)) lines.append('Detected plot Maximum Grid i,j (index, index): ' + str(maxBinij[0]) + ', ' + str(maxBinij[1])) #### Save Data File fname = 'RpvsSMAdetectionsDATA_' + folder.split('/')[-1] + '_' + date with open(os.path.join(PPoutpath, fname + '.txt'), 'w') as g: g.write("\n".join(lines)) #### Plot self.plotEccenHist(PPoutpath, folder, fname, date, z, det_eccens) del out del allres def extractXY(self, out, allres): """ Simply pulls out the Rp, SMA, and e data for each star in the pkl file Args: Returns: SMA () - SMA of all Stars Rp () - Rp of all Stars E () - E of all Stars """ Rpunits = allres[0]['systems']['Rp'].unit allres_Rp = np.concatenate([allres[i]['systems']['Rp'].value for i in range(len(allres))]) smaunits = allres[0]['systems']['a'].unit allres_sma = np.concatenate([allres[i]['systems']['a'].value for i in range(len(allres))]) allres_E = np.concatenate([allres[i]['systems']['e'] for i in range(len(allres))]) SMA = allres_sma Rp = allres_Rp E = allres_E return SMA, Rp, E def plotEccenHist(self, PPoutpath, folder, fname, date, uni_eccens, det_eccens): """ Plots the input population eccentricity histogram and detected planet eccentricity distribution """ #### Calculate universe planet pop eccen CDF uni_bins = np.linspace(start = 0., stop = 1., num = 1001., endpoint=True) plt.close(68132188463517733654) figH = plt.figure(68132188463517733654) uni_n, uni_bins, uni_patches = plt.hist(uni_eccens, bins=uni_bins, alpha=0.3, color='red', label='Universe') plt.xlabel('Planet Eccentricity, ' + r"$e$", weight='bold') plt.ylabel('Frequency (count)', weight='bold') uni_cdf = np.cumsum(uni_n)#cumtrapz(n, bins[:-1], initial=0.) uni_cdf_norm = uni_cdf/np.max(uni_cdf) #### Calculate detected planet pop eccen CDF det_n, det_bins, det_patches = plt.hist(det_eccens, bins=uni_bins, alpha=0.3, color='blue', label='Detected') det_cdf = np.cumsum(det_n)#cumtrapz(n, bins[:-1], initial=0.) det_cdf_norm = det_cdf/np.max(det_cdf) plt.xlim([0.,1.]) plt.legend() plt.show(block=False) #not saving plt.close(63548643515) figI = plt.figure(63548643515) plt.plot(uni_bins[:-1],uni_cdf_norm*100.,color='red',linestyle='--', label='Universe CDF') plt.plot(uni_bins[:-1],det_cdf_norm*100.,color='blue',linestyle='--', label='Detected CDF') plt.ylabel('Percent (%)', weight='bold') plt.xlabel('Planet Eccentricity, ' + r"$e$", weight='bold') plt.xlim([0.,1.]) plt.legend() plt.show(block=False) #not saving plt.close(278989879863) figJ = plt.figure(278989879863) plt.plot(uni_bins[:-1],det_n/uni_n,color='k',alpha=0.5, label='Count ratio') plt.plot(uni_bins[:-1],det_cdf/uni_cdf,color='purple', alpha=0.5, label='CDF ratio') plt.ylabel('Fraction of Planets Detected in Universe', weight='bold') plt.xlabel('Planet Eccentricity, ' + r"$e$", weight='bold') plt.ylim([0.,1.]) plt.xlim([0.,1.]) plt.legend() plt.show(block=False) fname = 'kop_EccenHistFractionDetected_1' + folder.split('/')[-1] + '_' + date plt.savefig(os.path.join(PPoutpath, fname + '.png'), format='png', dpi=500, bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.svg'), bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.pdf'), format='pdf', dpi=500, bbox_inches='tight') plt.ylim([0.,1.1*np.nanmax(det_n/uni_n)]) fname = 'kop_EccenHistFractionDetected_2' + folder.split('/')[-1] + '_' + date plt.savefig(os.path.join(PPoutpath, fname + '.png'), format='png', dpi=500, bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.svg'), bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.pdf'), format='pdf', dpi=500, bbox_inches='tight') plt.show(block=False) #### Plot histogram of detected plt.close(231635) fig_eccenHist = plt.figure(231635) plt.rc('axes',linewidth=2) plt.rc('lines',linewidth=2) plt.rcParams['axes.linewidth']=2 plt.rc('font',weight='bold') ax2 = fig_eccenHist.add_subplot(1,1,1) uni_bins_coarse = np.linspace(start = 0., stop = 1., num = 21., endpoint=True) uni_n2, uni_bins2 = np.histogram(uni_eccens, bins=uni_bins_coarse) det_n2, det_bins2 = np.histogram(det_eccens, bins=uni_bins_coarse) xcents_uni = (uni_bins_coarse[:-1]+uni_bins_coarse[1:])/2. width = np.diff(uni_bins_coarse) ax2.bar(xcents_uni, uni_n2, width=width, zorder=8,color='red',alpha=0.3, label='Universe: '+str(int(np.sum(uni_n)))) ax2.bar(xcents_uni, det_n2, width=width, zorder=8,color='blue',alpha=0.3, label='Detected: '+str(int(np.sum(det_n)))) ax2.set_xlabel('Oribital Eccentricity', weight='bold') ax3 = ax2.twinx() ax3.plot(uni_bins[:-1],uni_cdf_norm*100.,color='red',linestyle='--', label='Universe CDF') ax3.plot(uni_bins[:-1],det_cdf_norm*100.,color='blue',linestyle='--', label='Detected CDF') ax2.set_ylabel('Occurence Frequency (counts)', weight='bold') ax3.set_ylabel('Eccentricty CDF (%)', weight='bold') ax2.set_xlim(left=0.,right=1.) #ax2.set_ylim(bottom=1e-1,top=100.) #np.sum(uni_n)) ax3.set_ylim(bottom=0.,top=100.) ax2.legend(loc='upper right') ax3.legend(loc='lower right') ax2.ticklabel_format(style='sci', axis='y',scilimits=(0,5)) plt.show(block=False) fname = 'kop_DetectedEccenHist' + folder.split('/')[-1] + '_' + date plt.savefig(os.path.join(PPoutpath, fname + '.png'), format='png', dpi=500, bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.svg'), bbox_inches='tight') plt.savefig(os.path.join(PPoutpath, fname + '.pdf'), format='pdf', dpi=500, bbox_inches='tight')
bsd-3-clause
zihua/scikit-learn
examples/mixture/plot_gmm_pdf.py
140
1521
""" ========================================= Density Estimation for a Gaussian mixture ========================================= Plot the density estimation of a mixture of two Gaussians. Data is generated from two Gaussians with different centers and covariance matrices. """ import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from sklearn import mixture n_samples = 300 # generate random sample, two components np.random.seed(0) # generate spherical data centered on (20, 20) shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20]) # generate zero centered stretched Gaussian data C = np.array([[0., -0.7], [3.5, .7]]) stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C) # concatenate the two datasets into the final training set X_train = np.vstack([shifted_gaussian, stretched_gaussian]) # fit a Gaussian Mixture Model with two components clf = mixture.GaussianMixture(n_components=2, covariance_type='full') clf.fit(X_train) # display predicted scores by the model as a contour plot x = np.linspace(-20., 30.) y = np.linspace(-20., 40.) X, Y = np.meshgrid(x, y) XX = np.array([X.ravel(), Y.ravel()]).T Z = -clf.score_samples(XX) Z = Z.reshape(X.shape) CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0), levels=np.logspace(0, 3, 10)) CB = plt.colorbar(CS, shrink=0.8, extend='both') plt.scatter(X_train[:, 0], X_train[:, 1], .8) plt.title('Negative log-likelihood predicted by a GMM') plt.axis('tight') plt.show()
bsd-3-clause
nrhine1/scikit-learn
examples/hetero_feature_union.py
288
6236
""" ============================================= Feature Union with Heterogeneous Data Sources ============================================= Datasets can often contain components of that require different feature extraction and processing pipelines. This scenario might occur when: 1. Your dataset consists of heterogeneous data types (e.g. raster images and text captions) 2. Your dataset is stored in a Pandas DataFrame and different columns require different processing pipelines. This example demonstrates how to use :class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing different types of features. We use the 20-newsgroups dataset and compute standard bag-of-words features for the subject line and body in separate pipelines as well as ad hoc features on the body. We combine them (with weights) using a FeatureUnion and finally train a classifier on the combined set of features. The choice of features is not particularly helpful, but serves to illustrate the technique. """ # Author: Matt Terry <[email protected]> # # License: BSD 3 clause from __future__ import print_function import numpy as np from sklearn.base import BaseEstimator, TransformerMixin from sklearn.datasets import fetch_20newsgroups from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import classification_report from sklearn.pipeline import FeatureUnion from sklearn.pipeline import Pipeline from sklearn.svm import SVC class ItemSelector(BaseEstimator, TransformerMixin): """For data grouped by feature, select subset of data at a provided key. The data is expected to be stored in a 2D data structure, where the first index is over features and the second is over samples. i.e. >> len(data[key]) == n_samples Please note that this is the opposite convention to sklearn feature matrixes (where the first index corresponds to sample). ItemSelector only requires that the collection implement getitem (data[key]). Examples include: a dict of lists, 2D numpy array, Pandas DataFrame, numpy record array, etc. >> data = {'a': [1, 5, 2, 5, 2, 8], 'b': [9, 4, 1, 4, 1, 3]} >> ds = ItemSelector(key='a') >> data['a'] == ds.transform(data) ItemSelector is not designed to handle data grouped by sample. (e.g. a list of dicts). If your data is structured this way, consider a transformer along the lines of `sklearn.feature_extraction.DictVectorizer`. Parameters ---------- key : hashable, required The key corresponding to the desired value in a mappable. """ def __init__(self, key): self.key = key def fit(self, x, y=None): return self def transform(self, data_dict): return data_dict[self.key] class TextStats(BaseEstimator, TransformerMixin): """Extract features from each document for DictVectorizer""" def fit(self, x, y=None): return self def transform(self, posts): return [{'length': len(text), 'num_sentences': text.count('.')} for text in posts] class SubjectBodyExtractor(BaseEstimator, TransformerMixin): """Extract the subject & body from a usenet post in a single pass. Takes a sequence of strings and produces a dict of sequences. Keys are `subject` and `body`. """ def fit(self, x, y=None): return self def transform(self, posts): features = np.recarray(shape=(len(posts),), dtype=[('subject', object), ('body', object)]) for i, text in enumerate(posts): headers, _, bod = text.partition('\n\n') bod = strip_newsgroup_footer(bod) bod = strip_newsgroup_quoting(bod) features['body'][i] = bod prefix = 'Subject:' sub = '' for line in headers.split('\n'): if line.startswith(prefix): sub = line[len(prefix):] break features['subject'][i] = sub return features pipeline = Pipeline([ # Extract the subject & body ('subjectbody', SubjectBodyExtractor()), # Use FeatureUnion to combine the features from subject and body ('union', FeatureUnion( transformer_list=[ # Pipeline for pulling features from the post's subject line ('subject', Pipeline([ ('selector', ItemSelector(key='subject')), ('tfidf', TfidfVectorizer(min_df=50)), ])), # Pipeline for standard bag-of-words model for body ('body_bow', Pipeline([ ('selector', ItemSelector(key='body')), ('tfidf', TfidfVectorizer()), ('best', TruncatedSVD(n_components=50)), ])), # Pipeline for pulling ad hoc features from post's body ('body_stats', Pipeline([ ('selector', ItemSelector(key='body')), ('stats', TextStats()), # returns a list of dicts ('vect', DictVectorizer()), # list of dicts -> feature matrix ])), ], # weight components in FeatureUnion transformer_weights={ 'subject': 0.8, 'body_bow': 0.5, 'body_stats': 1.0, }, )), # Use a SVC classifier on the combined features ('svc', SVC(kernel='linear')), ]) # limit the list of categories to make running this exmaple faster. categories = ['alt.atheism', 'talk.religion.misc'] train = fetch_20newsgroups(random_state=1, subset='train', categories=categories, ) test = fetch_20newsgroups(random_state=1, subset='test', categories=categories, ) pipeline.fit(train.data, train.target) y = pipeline.predict(test.data) print(classification_report(y, test.target))
bsd-3-clause
andrewgross/json2parquet
json2parquet/client.py
1
6558
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import json import ciso8601 import pyarrow as pa import pyarrow.parquet as pq import pandas as pd epoch = datetime.datetime.utcfromtimestamp(0) def ingest_data(data, schema=None, date_format=None, field_aliases=None): """ data: Array of Dictionary objects schema: PyArrow schema object or list of column names date_format: Pandas datetime format string (with schema only) field_aliases: dict mapping Json field names to desired schema names return: a PyArrow Batch """ if isinstance(schema, list) and isinstance(field_aliases, dict): return _convert_data_with_column_names_dict(data, field_aliases) elif isinstance(schema, dict): return _convert_data_with_column_names_dict(data, schema) elif isinstance(schema, list): return _convert_data_with_column_names(data, schema) elif isinstance(schema, pa.Schema): return _convert_data_with_schema(data, schema, date_format=date_format, field_aliases=field_aliases) else: return _convert_data_without_schema(data) def _convert_data_without_schema(data): # Prepare for something ugly. # Iterate over all of the data to find all of our column names # Then parse the data as if we were given column names column_names = set() for row in data: names = set(row.keys()) column_names = column_names.union(names) column_names = sorted(list(column_names)) return _convert_data_with_column_names(data, column_names) def _convert_data_with_column_names_dict(data, schema): column_data = {} array_data = [] schema_names = [] for row in data: for column in schema: _col = column_data.get(column, []) _col.append(row.get(column)) column_data[column] = _col for column in schema.keys(): _col = column_data.get(column) array_data.append(pa.array(_col)) # Use custom column names given by user schema_names.append(schema[column]) return pa.RecordBatch.from_arrays(array_data, schema_names) def _convert_data_with_column_names(data, schema): column_data = {} array_data = [] for row in data: for column in schema: _col = column_data.get(column, []) _col.append(row.get(column)) column_data[column] = _col for column in schema: _col = column_data.get(column) array_data.append(pa.array(_col)) return pa.RecordBatch.from_arrays(array_data, schema) def _convert_data_with_schema(data, schema, date_format=None, field_aliases=None): column_data = {} array_data = [] schema_names = [] for row in data: for column in schema.names: _col = column_data.get(column, []) _col.append(row.get(column)) column_data[column] = _col for column in schema: _col = column_data.get(column.name) if isinstance(column.type, pa.lib.TimestampType): _converted_col = [] for t in _col: try: _converted_col.append(pd.to_datetime(t, format=date_format)) except pd._libs.tslib.OutOfBoundsDatetime: _converted_col.append(pd.Timestamp.max) array_data.append(pa.Array.from_pandas(pd.to_datetime(_converted_col), type=pa.timestamp('ns'))) elif column.type.id == pa.date32().id: _converted_col = map(_date_converter, _col) array_data.append(pa.array(_converted_col, type=pa.date32())) # Float types are ambiguous for conversions, need to specify the exact type elif column.type.id == pa.float64().id: array_data.append(pa.array(_col, type=pa.float64())) elif column.type.id == pa.float32().id: # Python doesn't have a native float32 type # and PyArrow cannot cast float64 -> float32 _col = pd.to_numeric(_col, downcast='float') array_data.append(pa.Array.from_pandas(_col, type=pa.float32())) elif column.type.id == pa.int32().id: # PyArrow 0.8.0 can cast int64 -> int32 _col64 = pa.array(_col, type=pa.int64()) array_data.append(_col64.cast(pa.int32())) elif column.type.id == pa.bool_().id: _col = map(_boolean_converter, _col) array_data.append(pa.array(_col, type=column.type)) else: array_data.append(pa.array(_col, type=column.type)) if isinstance(field_aliases, dict): schema_names.append(field_aliases.get(column.name, column.name)) else: schema_names.append(column.name) return pa.RecordBatch.from_arrays(array_data, schema_names) def _boolean_converter(val): if val is None: return val return bool(val) def _date_converter(date_str): dt = ciso8601.parse_datetime(date_str) return (dt - epoch).days def load_json(filename, schema=None, date_format=None, field_aliases=None): """ Simple but inefficient way to load data from a newline delineated json file """ json_data = [] with open(filename, "r") as f: for line in f.readlines(): if line: json_data.append(json.loads(line)) return ingest_data(json_data, schema=schema, date_format=date_format, field_aliases=field_aliases) def write_parquet(data, destination, **kwargs): """ data: PyArrow record batch destination: Output file name **kwargs: defined at https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html """ try: table = pa.Table.from_batches(data) except TypeError: table = pa.Table.from_batches([data]) pq.write_table(table, destination, **kwargs) def write_parquet_dataset(data, destination, **kwargs): """ data: PyArrow record batch destination: Output directory **kwargs: defined at https://arrow.apache.org/docs/python/generated/pyarrow.parquet.write_table.html This adds support for writing with partitions, compared with 'write_table'. """ try: table = pa.Table.from_batches(data) except TypeError: table = pa.Table.from_batches([data]) pq.write_to_dataset(table, destination, **kwargs) def convert_json(input, output, schema=None, date_format=None, field_aliases=None, **kwargs): data = load_json(input, schema=schema, date_format=date_format, field_aliases=field_aliases) write_parquet(data, output, **kwargs)
mit
ZenDevelopmentSystems/scikit-learn
sklearn/utils/fixes.py
39
13318
"""Compatibility fixes for older version of python, numpy and scipy If you add content to this file, please give the version of the package at which the fixe is no longer needed. """ # Authors: Emmanuelle Gouillart <[email protected]> # Gael Varoquaux <[email protected]> # Fabian Pedregosa <[email protected]> # Lars Buitinck # # License: BSD 3 clause import warnings import sys import functools import os import errno import numpy as np import scipy.sparse as sp import scipy try: from inspect import signature except ImportError: from ..externals.funcsigs import signature def _parse_version(version_string): version = [] for x in version_string.split('.'): try: version.append(int(x)) except ValueError: # x may be of the form dev-1ea1592 version.append(x) return tuple(version) np_version = _parse_version(np.__version__) sp_version = _parse_version(scipy.__version__) try: from scipy.special import expit # SciPy >= 0.10 with np.errstate(invalid='ignore', over='ignore'): if np.isnan(expit(1000)): # SciPy < 0.14 raise ImportError("no stable expit in scipy.special") except ImportError: def expit(x, out=None): """Logistic sigmoid function, ``1 / (1 + exp(-x))``. See sklearn.utils.extmath.log_logistic for the log of this function. """ if out is None: out = np.empty(np.atleast_1d(x).shape, dtype=np.float64) out[:] = x # 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2 # This way of computing the logistic is both fast and stable. out *= .5 np.tanh(out, out) out += 1 out *= .5 return out.reshape(np.shape(x)) # little danse to see if np.copy has an 'order' keyword argument if 'order' in signature(np.copy).parameters: def safe_copy(X): # Copy, but keep the order return np.copy(X, order='K') else: # Before an 'order' argument was introduced, numpy wouldn't muck with # the ordering safe_copy = np.copy try: if (not np.allclose(np.divide(.4, 1, casting="unsafe"), np.divide(.4, 1, casting="unsafe", dtype=np.float)) or not np.allclose(np.divide(.4, 1), .4)): raise TypeError('Divide not working with dtype: ' 'https://github.com/numpy/numpy/issues/3484') divide = np.divide except TypeError: # Compat for old versions of np.divide that do not provide support for # the dtype args def divide(x1, x2, out=None, dtype=None): out_orig = out if out is None: out = np.asarray(x1, dtype=dtype) if out is x1: out = x1.copy() else: if out is not x1: out[:] = x1 if dtype is not None and out.dtype != dtype: out = out.astype(dtype) out /= x2 if out_orig is None and np.isscalar(x1): out = np.asscalar(out) return out try: np.array(5).astype(float, copy=False) except TypeError: # Compat where astype accepted no copy argument def astype(array, dtype, copy=True): if not copy and array.dtype == dtype: return array return array.astype(dtype) else: astype = np.ndarray.astype try: with warnings.catch_warnings(record=True): # Don't raise the numpy deprecation warnings that appear in # 1.9, but avoid Python bug due to simplefilter('ignore') warnings.simplefilter('always') sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0) except (TypeError, AttributeError): # in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument # the following code is taken from the scipy 0.14 codebase def _minor_reduce(X, ufunc): major_index = np.flatnonzero(np.diff(X.indptr)) if X.data.size == 0 and major_index.size == 0: # Numpy < 1.8.0 don't handle empty arrays in reduceat value = np.zeros_like(X.data) else: value = ufunc.reduceat(X.data, X.indptr[major_index]) return major_index, value def _min_or_max_axis(X, axis, min_or_max): N = X.shape[axis] if N == 0: raise ValueError("zero-size array to reduction operation") M = X.shape[1 - axis] mat = X.tocsc() if axis == 0 else X.tocsr() mat.sum_duplicates() major_index, value = _minor_reduce(mat, min_or_max) not_full = np.diff(mat.indptr)[major_index] < N value[not_full] = min_or_max(value[not_full], 0) mask = value != 0 major_index = np.compress(mask, major_index) value = np.compress(mask, value) from scipy.sparse import coo_matrix if axis == 0: res = coo_matrix((value, (np.zeros(len(value)), major_index)), dtype=X.dtype, shape=(1, M)) else: res = coo_matrix((value, (major_index, np.zeros(len(value)))), dtype=X.dtype, shape=(M, 1)) return res.A.ravel() def _sparse_min_or_max(X, axis, min_or_max): if axis is None: if 0 in X.shape: raise ValueError("zero-size array to reduction operation") zero = X.dtype.type(0) if X.nnz == 0: return zero m = min_or_max.reduce(X.data.ravel()) if X.nnz != np.product(X.shape): m = min_or_max(zero, m) return m if axis < 0: axis += 2 if (axis == 0) or (axis == 1): return _min_or_max_axis(X, axis, min_or_max) else: raise ValueError("invalid axis, use 0 for rows, or 1 for columns") def sparse_min_max(X, axis): return (_sparse_min_or_max(X, axis, np.minimum), _sparse_min_or_max(X, axis, np.maximum)) else: def sparse_min_max(X, axis): return (X.min(axis=axis).toarray().ravel(), X.max(axis=axis).toarray().ravel()) try: from numpy import argpartition except ImportError: # numpy.argpartition was introduced in v 1.8.0 def argpartition(a, kth, axis=-1, kind='introselect', order=None): return np.argsort(a, axis=axis, order=order) try: from itertools import combinations_with_replacement except ImportError: # Backport of itertools.combinations_with_replacement for Python 2.6, # from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright # Python Software Foundation (https://docs.python.org/3/license.html) def combinations_with_replacement(iterable, r): # combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC pool = tuple(iterable) n = len(pool) if not n and r: return indices = [0] * r yield tuple(pool[i] for i in indices) while True: for i in reversed(range(r)): if indices[i] != n - 1: break else: return indices[i:] = [indices[i] + 1] * (r - i) yield tuple(pool[i] for i in indices) try: from numpy import isclose except ImportError: def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ Returns a boolean array where two arrays are element-wise equal within a tolerance. This function was added to numpy v1.7.0, and the version you are running has been backported from numpy v1.8.1. See its documentation for more details. """ def within_tol(x, y, atol, rtol): with np.errstate(invalid='ignore'): result = np.less_equal(abs(x - y), atol + rtol * abs(y)) if np.isscalar(a) and np.isscalar(b): result = bool(result) return result x = np.array(a, copy=False, subok=True, ndmin=1) y = np.array(b, copy=False, subok=True, ndmin=1) xfin = np.isfinite(x) yfin = np.isfinite(y) if all(xfin) and all(yfin): return within_tol(x, y, atol, rtol) else: finite = xfin & yfin cond = np.zeros_like(finite, subok=True) # Since we're using boolean indexing, x & y must be the same shape. # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in # lib.stride_tricks, though, so we can't import it here. x = x * np.ones_like(cond) y = y * np.ones_like(cond) # Avoid subtraction with infinite/nan values... cond[finite] = within_tol(x[finite], y[finite], atol, rtol) # Check for equality of infinite values... cond[~finite] = (x[~finite] == y[~finite]) if equal_nan: # Make NaN == NaN cond[np.isnan(x) & np.isnan(y)] = True return cond if np_version < (1, 7): # Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg. def frombuffer_empty(buf, dtype): if len(buf) == 0: return np.empty(0, dtype=dtype) else: return np.frombuffer(buf, dtype=dtype) else: frombuffer_empty = np.frombuffer if np_version < (1, 8): def in1d(ar1, ar2, assume_unique=False, invert=False): # Backport of numpy function in1d 1.8.1 to support numpy 1.6.2 # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # This code is significantly faster when the condition is satisfied. if len(ar2) < 10 * len(ar1) ** 0.145: if invert: mask = np.ones(len(ar1), dtype=np.bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=np.bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) indx = order.argsort(kind='mergesort')[:len(ar1)] if assume_unique: return flag[indx] else: return flag[indx][rev_idx] else: from numpy import in1d if sp_version < (0, 15): # Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142 from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr else: from scipy.sparse.linalg import lsqr as sparse_lsqr if sys.version_info < (2, 7, 0): # partial cannot be pickled in Python 2.6 # http://bugs.python.org/issue1398 class partial(object): def __init__(self, func, *args, **keywords): functools.update_wrapper(self, func) self.func = func self.args = args self.keywords = keywords def __call__(self, *args, **keywords): args = self.args + args kwargs = self.keywords.copy() kwargs.update(keywords) return self.func(*args, **kwargs) else: from functools import partial if np_version < (1, 6, 2): # Allow bincount to accept empty arrays # https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040 def bincount(x, weights=None, minlength=None): if len(x) > 0: return np.bincount(x, weights, minlength) else: if minlength is None: minlength = 0 minlength = np.asscalar(np.asarray(minlength, dtype=np.intp)) return np.zeros(minlength, dtype=np.intp) else: from numpy import bincount if 'exist_ok' in signature(os.makedirs).parameters: makedirs = os.makedirs else: def makedirs(name, mode=0o777, exist_ok=False): """makedirs(name [, mode=0o777][, exist_ok=False]) Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. If the target directory already exists, raise an OSError if exist_ok is False. Otherwise no exception is raised. This is recursive. """ try: os.makedirs(name, mode=mode) except OSError as e: if (not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(name)): raise if np_version < (1, 8, 1): def array_equal(a1, a2): # copy-paste from numpy 1.8.1 try: a1, a2 = np.asarray(a1), np.asarray(a2) except: return False if a1.shape != a2.shape: return False return bool(np.asarray(a1 == a2).all()) else: from numpy import array_equal
bsd-3-clause
hsuantien/scikit-learn
sklearn/linear_model/coordinate_descent.py
42
73973
# Author: Alexandre Gramfort <[email protected]> # Fabian Pedregosa <[email protected]> # Olivier Grisel <[email protected]> # Gael Varoquaux <[email protected]> # # License: BSD 3 clause import sys import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy import sparse from .base import LinearModel, _pre_fit from ..base import RegressorMixin from .base import center_data, sparse_center_data from ..utils import check_array, check_X_y, deprecated from ..utils.validation import check_random_state from ..cross_validation import check_cv from ..externals.joblib import Parallel, delayed from ..externals import six from ..externals.six.moves import xrange from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_is_fitted from ..utils import ConvergenceWarning from . import cd_fast ############################################################################### # Paths functions def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True, eps=1e-3, n_alphas=100, normalize=False, copy_X=True): """ Compute the grid of alpha values for elastic net parameter search Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication y : ndarray, shape (n_samples,) Target values Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. l1_ratio : float The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean, default True Whether to fit an intercept or not normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. """ n_samples = len(y) sparse_center = False if Xy is None: X_sparse = sparse.isspmatrix(X) sparse_center = X_sparse and (fit_intercept or normalize) X = check_array(X, 'csc', copy=(copy_X and fit_intercept and not X_sparse)) if not X_sparse: # X can be touched inplace thanks to the above line X, y, _, _, _ = center_data(X, y, fit_intercept, normalize, copy=False) Xy = safe_sparse_dot(X.T, y, dense_output=True) if sparse_center: # Workaround to find alpha_max for sparse matrices. # since we should not destroy the sparsity of such matrices. _, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept, normalize) mean_dot = X_mean * np.sum(y) if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if sparse_center: if fit_intercept: Xy -= mean_dot[:, np.newaxis] if normalize: Xy /= X_std[:, np.newaxis] alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() / (n_samples * l1_ratio)) if alpha_max <= np.finfo(float).resolution: alphas = np.empty(n_alphas) alphas.fill(np.finfo(float).resolution) return alphas return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max), num=n_alphas)[::-1] def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute Lasso path with coordinate descent The Lasso optimization function varies for mono and multi-outputs. For mono-output tasks it is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <lasso>`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,), or (n_samples, n_outputs) Target values eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. positive : bool, default False If set to True, forces coefficients to be positive. return_n_iter : bool whether to return the number of iterations or not. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. Notes ----- See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. Note that in certain cases, the Lars solver may be significantly faster to implement this functionality. In particular, linear interpolation can be used to retrieve model coefficients between the values output by lars_path Examples --------- Comparing lasso_path and lars_path with interpolation: >>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T >>> y = np.array([1, 2, 3.1]) >>> # Use lasso_path to compute a coefficient path >>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5]) >>> print(coef_path) [[ 0. 0. 0.46874778] [ 0.2159048 0.4425765 0.23689075]] >>> # Now use lars_path and 1D linear interpolation to compute the >>> # same path >>> from sklearn.linear_model import lars_path >>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso') >>> from scipy import interpolate >>> coef_path_continuous = interpolate.interp1d(alphas[::-1], ... coef_path_lars[:, ::-1]) >>> print(coef_path_continuous([5., 1., .5])) [[ 0. 0. 0.46915237] [ 0.2159048 0.4425765 0.23668876]] See also -------- lars_path Lasso LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas, alphas=alphas, precompute=precompute, Xy=Xy, copy_X=copy_X, coef_init=coef_init, verbose=verbose, positive=positive, **params) def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params): """Compute elastic net path with coordinate descent The elastic net optimization function varies for mono and multi-outputs. For mono-output tasks it is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 For multi-output tasks it is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as Fortran-contiguous data to avoid unnecessary memory duplication. If ``y`` is mono-output then ``X`` can be sparse. y : ndarray, shape (n_samples,) or (n_samples, n_outputs) Target values l1_ratio : float, optional float between 0 and 1 passed to elastic net (scaling between l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso eps : float Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3`` n_alphas : int, optional Number of alphas along the regularization path alphas : ndarray, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. Xy : array-like, optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. coef_init : array, shape (n_features, ) | None The initial values of the coefficients. verbose : bool or integer Amount of verbosity. params : kwargs keyword arguments passed to the coordinate descent solver. return_n_iter : bool whether to return the number of iterations or not. positive : bool, default False If set to True, forces coefficients to be positive. Returns ------- alphas : array, shape (n_alphas,) The alphas along the path where models are computed. coefs : array, shape (n_features, n_alphas) or \ (n_outputs, n_features, n_alphas) Coefficients along the path. dual_gaps : array, shape (n_alphas,) The dual gaps at the end of the optimization for each alpha. n_iters : array-like, shape (n_alphas,) The number of iterations taken by the coordinate descent optimizer to reach the specified tolerance for each alpha. (Is returned when ``return_n_iter`` is set to True). Notes ----- See examples/plot_lasso_coordinate_descent_path.py for an example. See also -------- MultiTaskElasticNet MultiTaskElasticNetCV ElasticNet ElasticNetCV """ X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) if Xy is not None: Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False, ensure_2d=False) n_samples, n_features = X.shape multi_output = False if y.ndim != 1: multi_output = True _, n_outputs = y.shape # MultiTaskElasticNet does not support sparse matrices if not multi_output and sparse.isspmatrix(X): if 'X_mean' in params: # As sparse matrices are not actually centered we need this # to be passed to the CD solver. X_sparse_scaling = params['X_mean'] / params['X_std'] else: X_sparse_scaling = np.zeros(n_features) # X should be normalized and fit already. X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False, copy=False) if alphas is None: # No need to normalize of fit_intercept: it has been done # above alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio, fit_intercept=False, eps=eps, n_alphas=n_alphas, normalize=False, copy_X=False) else: alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered n_alphas = len(alphas) tol = params.get('tol', 1e-4) max_iter = params.get('max_iter', 1000) dual_gaps = np.empty(n_alphas) n_iters = [] rng = check_random_state(params.get('random_state', None)) selection = params.get('selection', 'cyclic') if selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (selection == 'random') if not multi_output: coefs = np.empty((n_features, n_alphas), dtype=np.float64) else: coefs = np.empty((n_outputs, n_features, n_alphas), dtype=np.float64) if coef_init is None: coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1])) else: coef_ = np.asfortranarray(coef_init) for i, alpha in enumerate(alphas): l1_reg = alpha * l1_ratio * n_samples l2_reg = alpha * (1.0 - l1_ratio) * n_samples if not multi_output and sparse.isspmatrix(X): model = cd_fast.sparse_enet_coordinate_descent( coef_, l1_reg, l2_reg, X.data, X.indices, X.indptr, y, X_sparse_scaling, max_iter, tol, rng, random, positive) elif multi_output: model = cd_fast.enet_coordinate_descent_multi_task( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random) elif isinstance(precompute, np.ndarray): precompute = check_array(precompute, 'csc', dtype=np.float64, order='F') model = cd_fast.enet_coordinate_descent_gram( coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter, tol, rng, random, positive) elif precompute is False: model = cd_fast.enet_coordinate_descent( coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random, positive) else: raise ValueError("Precompute should be one of True, False, " "'auto' or array-like") coef_, dual_gap_, eps_, n_iter_ = model coefs[..., i] = coef_ dual_gaps[i] = dual_gap_ n_iters.append(n_iter_) if dual_gap_ > eps_: warnings.warn('Objective did not converge.' + ' You might want' + ' to increase the number of iterations', ConvergenceWarning) if verbose: if verbose > 2: print(model) elif verbose > 1: print('Path: %03i out of %03i' % (i, n_alphas)) else: sys.stderr.write('.') if return_n_iter: return alphas, coefs, dual_gaps, n_iters return alphas, coefs, dual_gaps ############################################################################### # ElasticNet model class ElasticNet(LinearModel, RegressorMixin): """Linear regression with combined L1 and L2 priors as regularizer. Minimizes the objective function:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 where:: alpha = a + b and l1_ratio = a / (a + b) The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio = 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable, unless you supply your own sequence of alpha. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- alpha : float Constant that multiplies the penalty terms. Defaults to 1.0 See the notes for the exact mathematical meaning of this parameter. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` with the Lasso object is not advised and you should prefer the LinearRegression object. l1_ratio : float The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2. fit_intercept : bool Whether the intercept should be estimated or not. If ``False``, the data is assumed to be already centered. normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. WARNING : The ``'auto'`` option is deprecated and will be removed in 0.18. max_iter : int, optional The maximum number of iterations copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Notes ----- To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- SGDRegressor: implements elastic net regression with incremental training. SGDClassifier: implements logistic regression with elastic net penalty (``SGDClassifier(loss="log", penalty="elasticnet")``). """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, precompute=False, max_iter=1000, copy_X=True, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): self.alpha = alpha self.l1_ratio = l1_ratio self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.positive = positive self.intercept_ = 0.0 self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit model with coordinate descent. Parameters ----------- X : ndarray or scipy.sparse matrix, (n_samples, n_features) Data y : ndarray, shape (n_samples,) or (n_samples, n_targets) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ if self.alpha == 0: warnings.warn("With alpha=0, this algorithm does not converge " "well. You are advised to use the LinearRegression " "estimator", stacklevel=2) if self.precompute == 'auto': warnings.warn("Setting precompute to 'auto', was found to be " "slower even when n_samples > n_features. Hence " "it will be removed in 0.18.", DeprecationWarning, stacklevel=2) X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept, multi_output=True, y_numeric=True) X, y, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if y.ndim == 1: y = y[:, np.newaxis] if Xy is not None and Xy.ndim == 1: Xy = Xy[:, np.newaxis] n_samples, n_features = X.shape n_targets = y.shape[1] if self.selection not in ['cyclic', 'random']: raise ValueError("selection should be either random or cyclic.") if not self.warm_start or self.coef_ is None: coef_ = np.zeros((n_targets, n_features), dtype=np.float64, order='F') else: coef_ = self.coef_ if coef_.ndim == 1: coef_ = coef_[np.newaxis, :] dual_gaps_ = np.zeros(n_targets, dtype=np.float64) self.n_iter_ = [] for k in xrange(n_targets): if Xy is not None: this_Xy = Xy[:, k] else: this_Xy = None _, this_coef, this_dual_gap, this_iter = \ self.path(X, y[:, k], l1_ratio=self.l1_ratio, eps=None, n_alphas=None, alphas=[self.alpha], precompute=precompute, Xy=this_Xy, fit_intercept=False, normalize=False, copy_X=True, verbose=False, tol=self.tol, positive=self.positive, X_mean=X_mean, X_std=X_std, return_n_iter=True, coef_init=coef_[k], max_iter=self.max_iter, random_state=self.random_state, selection=self.selection) coef_[k] = this_coef[:, 0] dual_gaps_[k] = this_dual_gap[0] self.n_iter_.append(this_iter[0]) if n_targets == 1: self.n_iter_ = self.n_iter_[0] self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_]) self._set_intercept(X_mean, y_mean, X_std) # return self for chaining fit and predict calls return self @property def sparse_coef_(self): """ sparse representation of the fitted coef """ return sparse.csr_matrix(self.coef_) @deprecated(" and will be removed in 0.19") def decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape (n_samples,) The predicted decision function """ return self._decision_function(X) def _decision_function(self, X): """Decision function of the linear model Parameters ---------- X : numpy array or scipy.sparse matrix of shape (n_samples, n_features) Returns ------- T : array, shape (n_samples,) The predicted decision function """ check_is_fitted(self, 'n_iter_') if sparse.isspmatrix(X): return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True) + self.intercept_) else: return super(ElasticNet, self)._decision_function(X) ############################################################################### # Lasso model class Lasso(ElasticNet): """Linear Model trained with L1 prior as regularizer (aka the Lasso) The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Technically the Lasso model is optimizing the same objective function as the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty). Read more in the :ref:`User Guide <lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1 term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by the :class:`LinearRegression` object. For numerical reasons, using ``alpha = 0`` is with the Lasso object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. For sparse input this option is always ``True`` to preserve sparsity. WARNING : The ``'auto'`` option is deprecated and will be removed in 0.18. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \ (n_targets, n_features) ``sparse_coef_`` is a readonly property derived from ``coef_`` intercept_ : float | array, shape (n_targets,) independent term in decision function. n_iter_ : int | array-like, shape (n_targets,) number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, positive=False, precompute=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [ 0.85 0. ] >>> print(clf.intercept_) 0.15 See also -------- lars_path lasso_path LassoLars LassoCV LassoLarsCV sklearn.decomposition.sparse_encode Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): super(Lasso, self).__init__( alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, copy_X=copy_X, max_iter=max_iter, tol=tol, warm_start=warm_start, positive=positive, random_state=random_state, selection=selection) ############################################################################### # Functions for CV with paths functions def _path_residuals(X, y, train, test, path, path_params, alphas=None, l1_ratio=1, X_order=None, dtype=None): """Returns the MSE for the models computed by 'path' Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values train : list of indices The indices of the train set test : list of indices The indices of the test set path : callable function returning a list of models on the path. See enet_path for an example of signature path_params : dictionary Parameters passed to the path function alphas : array-like, optional Array of float that is used for cross-validation. If not provided, computed using 'path' l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 X_order : {'F', 'C', or None}, optional The order of the arrays expected by the path function to avoid memory copies dtype : a numpy dtype or None The dtype of the arrays expected by the path function to avoid memory copies """ X_train = X[train] y_train = y[train] X_test = X[test] y_test = y[test] fit_intercept = path_params['fit_intercept'] normalize = path_params['normalize'] if y.ndim == 1: precompute = path_params['precompute'] else: # No Gram variant of multi-task exists right now. # Fall back to default enet_multitask precompute = False X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \ _pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept, copy=False) path_params = path_params.copy() path_params['Xy'] = Xy path_params['X_mean'] = X_mean path_params['X_std'] = X_std path_params['precompute'] = precompute path_params['copy_X'] = False path_params['alphas'] = alphas if 'l1_ratio' in path_params: path_params['l1_ratio'] = l1_ratio # Do the ordering and type casting here, as if it is done in the path, # X is copied and a reference is kept here X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order) alphas, coefs, _ = path(X_train, y_train, **path_params) del X_train, y_train if y.ndim == 1: # Doing this so that it becomes coherent with multioutput. coefs = coefs[np.newaxis, :, :] y_mean = np.atleast_1d(y_mean) y_test = y_test[:, np.newaxis] if normalize: nonzeros = np.flatnonzero(X_std) coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis] intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs) if sparse.issparse(X_test): n_order, n_features, n_alphas = coefs.shape # Work around for sparse matices since coefs is a 3-D numpy array. coefs_feature_major = np.rollaxis(coefs, 1) feature_2d = np.reshape(coefs_feature_major, (n_features, -1)) X_test_coefs = safe_sparse_dot(X_test, feature_2d) X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1) else: X_test_coefs = safe_sparse_dot(X_test, coefs) residues = X_test_coefs - y_test[:, :, np.newaxis] residues += intercepts this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0) return this_mses class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)): """Base class for iterative model fitting along a regularization path""" @abstractmethod def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.copy_X = copy_X self.cv = cv self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit linear model with coordinate descent Fit is on grid of alphas and best alpha estimated by cross-validation. Parameters ---------- X : {array-like}, shape (n_samples, n_features) Training data. Pass directly as float64, Fortran-contiguous data to avoid unnecessary memory duplication. If y is mono-output, X can be sparse. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values """ y = np.asarray(y, dtype=np.float64) if y.shape[0] == 0: raise ValueError("y has 0 samples: %r" % y) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV): if model_str == 'ElasticNet': model = ElasticNet() else: model = Lasso() if y.ndim > 1: raise ValueError("For multi-task outputs, use " "MultiTask%sCV" % (model_str)) else: if sparse.isspmatrix(X): raise TypeError("X should be dense but a sparse matrix was" "passed") elif y.ndim == 1: raise ValueError("For mono-task outputs, use " "%sCV" % (model_str)) if model_str == 'ElasticNet': model = MultiTaskElasticNet() else: model = MultiTaskLasso() if self.selection not in ["random", "cyclic"]: raise ValueError("selection should be either random or cyclic.") # This makes sure that there is no duplication in memory. # Dealing right with copy_X is important in the following: # Multiple functions touch X and subsamples of X and can induce a # lot of duplication of memory copy_X = self.copy_X and self.fit_intercept if isinstance(X, np.ndarray) or sparse.isspmatrix(X): # Keep a reference to X reference_to_old_X = X # Let us not impose fortran ordering or float64 so far: it is # not useful for the cross-validation loop and will be done # by the model fitting itself X = check_array(X, 'csc', copy=False) if sparse.isspmatrix(X): if not np.may_share_memory(reference_to_old_X.data, X.data): # X is a sparse matrix and has been copied copy_X = False elif not np.may_share_memory(reference_to_old_X, X): # X has been copied copy_X = False del reference_to_old_X else: X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X) copy_X = False if X.shape[0] != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (X.shape[0], y.shape[0])) # All LinearModelCV parameters except 'cv' are acceptable path_params = self.get_params() if 'l1_ratio' in path_params: l1_ratios = np.atleast_1d(path_params['l1_ratio']) # For the first path, we need to set l1_ratio path_params['l1_ratio'] = l1_ratios[0] else: l1_ratios = [1, ] path_params.pop('cv', None) path_params.pop('n_jobs', None) alphas = self.alphas n_l1_ratio = len(l1_ratios) if alphas is None: alphas = [] for l1_ratio in l1_ratios: alphas.append(_alpha_grid( X, y, l1_ratio=l1_ratio, fit_intercept=self.fit_intercept, eps=self.eps, n_alphas=self.n_alphas, normalize=self.normalize, copy_X=self.copy_X)) else: # Making sure alphas is properly ordered. alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1)) # We want n_alphas to be the number of alphas used for each l1_ratio. n_alphas = len(alphas[0]) path_params.update({'n_alphas': n_alphas}) path_params['copy_X'] = copy_X # We are not computing in parallel, we can modify X # inplace in the folds if not (self.n_jobs == 1 or self.n_jobs is None): path_params['copy_X'] = False # init cross-validation generator cv = check_cv(self.cv, X) # Compute path for all folds and compute MSE to get the best alpha folds = list(cv) best_mse = np.inf # We do a double for loop folded in one, in order to be able to # iterate in parallel on l1_ratio and folds jobs = (delayed(_path_residuals)(X, y, train, test, self.path, path_params, alphas=this_alphas, l1_ratio=this_l1_ratio, X_order='F', dtype=np.float64) for this_l1_ratio, this_alphas in zip(l1_ratios, alphas) for train, test in folds) mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(jobs) mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1)) mean_mse = np.mean(mse_paths, axis=1) self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1)) for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas, mean_mse): i_best_alpha = np.argmin(mse_alphas) this_best_mse = mse_alphas[i_best_alpha] if this_best_mse < best_mse: best_alpha = l1_alphas[i_best_alpha] best_l1_ratio = l1_ratio best_mse = this_best_mse self.l1_ratio_ = best_l1_ratio self.alpha_ = best_alpha if self.alphas is None: self.alphas_ = np.asarray(alphas) if n_l1_ratio == 1: self.alphas_ = self.alphas_[0] # Remove duplicate alphas in case alphas is provided. else: self.alphas_ = np.asarray(alphas[0]) # Refit the model with the parameters selected common_params = dict((name, value) for name, value in self.get_params().items() if name in model.get_params()) model.set_params(**common_params) model.alpha = best_alpha model.l1_ratio = best_l1_ratio model.copy_X = copy_X model.precompute = False model.fit(X, y) if not hasattr(self, 'l1_ratio'): del self.l1_ratio_ self.coef_ = model.coef_ self.intercept_ = model.intercept_ self.dual_gap_ = model.dual_gap_ self.n_iter_ = model.n_iter_ return self class LassoCV(LinearModelCV, RegressorMixin): """Lasso linear model with iterative fitting along a regularization path The best model is selected by cross-validation. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path alphas : numpy array, optional List of alphas where to compute the models. If ``None`` alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional If positive, restrict regression coefficients to be positive selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean, default True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) parameter vector (w in the cost function formula) intercept_ : float | array, shape (n_targets,) independent term in decision function. mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting dual_gap_ : ndarray, shape () The dual gap at the end of the optimization for the optimal alpha (``alpha_``). n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. See also -------- lars_path lasso_path LassoLars Lasso LassoLarsCV """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, positive=False, random_state=None, selection='cyclic'): super(LassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive, random_state=random_state, selection=selection) class ElasticNetCV(LinearModelCV, RegressorMixin): """Elastic Net model with iterative fitting along a regularization path The best model is selected by cross-validation. Read more in the :ref:`User Guide <elastic_net>`. Parameters ---------- l1_ratio : float, optional float between 0 and 1 passed to ElasticNet (scaling between l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2 This parameter can be a list, in which case the different values are tested by cross-validation and the one giving the best prediction score is used. Note that a good choice of list of values for l1_ratio is often to put more values close to 1 (i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7, .9, .95, .99, 1]`` eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. n_alphas : int, optional Number of alphas along the regularization path, used for each l1_ratio. alphas : numpy array, optional List of alphas where to compute the models. If None alphas are set automatically precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. positive : bool, optional When set to ``True``, forces the coefficients to be positive. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Attributes ---------- alpha_ : float The amount of penalization chosen by cross validation l1_ratio_ : float The compromise between l1 and l2 penalization chosen by cross validation coef_ : array, shape (n_features,) | (n_targets, n_features) Parameter vector (w in the cost function formula), intercept_ : float | array, shape (n_targets, n_features) Independent term in the decision function. mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds) Mean square error for the test set on each fold, varying l1_ratio and alpha. alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Notes ----- See examples/linear_model/lasso_path_with_crossvalidation.py for an example. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. The parameter l1_ratio corresponds to alpha in the glmnet R package while alpha corresponds to the lambda parameter in glmnet. More specifically, the optimization objective is:: 1 / (2 * n_samples) * ||y - Xw||^2_2 + + alpha * l1_ratio * ||w||_1 + 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2 If you are interested in controlling the L1 and L2 penalty separately, keep in mind that this is equivalent to:: a * L1 + b * L2 for:: alpha = a + b and l1_ratio = a / (a + b). See also -------- enet_path ElasticNet """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, positive=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.positive = positive self.random_state = random_state self.selection = selection ############################################################################### # Multi Task ElasticNet and Lasso models (with joint feature selection) class MultiTaskElasticNet(Lasso): """Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 l1_ratio : float The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). If a 1D y is \ passed in at fit (non multi-task usage), ``coef_`` is then a 1D array n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNet(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True, l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.45663524 0.45612256] [ 0.45663524 0.45612256]] >>> print(clf.intercept_) [ 0.0872422 0.0872422] See also -------- ElasticNet, MultiTaskLasso Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.random_state = random_state self.selection = selection def fit(self, X, y): """Fit MultiTaskLasso model with coordinate descent Parameters ----------- X : ndarray, shape (n_samples, n_features) Data y : ndarray, shape (n_samples, n_tasks) Target Notes ----- Coordinate descent is an algorithm that considers each column of data at a time hence it will automatically convert the X input as a Fortran-contiguous numpy array if necessary. To avoid memory re-allocation it is advised to allocate the initial data in memory directly using that format. """ # X and y must be of type float64 X = check_array(X, dtype=np.float64, order='F', copy=self.copy_X and self.fit_intercept) y = np.asarray(y, dtype=np.float64) if hasattr(self, 'l1_ratio'): model_str = 'ElasticNet' else: model_str = 'Lasso' if y.ndim == 1: raise ValueError("For mono-task outputs, use %s" % model_str) n_samples, n_features = X.shape _, n_tasks = y.shape if n_samples != y.shape[0]: raise ValueError("X and y have inconsistent dimensions (%d != %d)" % (n_samples, y.shape[0])) X, y, X_mean, y_mean, X_std = center_data( X, y, self.fit_intercept, self.normalize, copy=False) if not self.warm_start or self.coef_ is None: self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64, order='F') l1_reg = self.alpha * self.l1_ratio * n_samples l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory if self.selection not in ['random', 'cyclic']: raise ValueError("selection should be either random or cyclic.") random = (self.selection == 'random') self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \ cd_fast.enet_coordinate_descent_multi_task( self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol, check_random_state(self.random_state), random) self._set_intercept(X_mean, y_mean, X_std) if self.dual_gap_ > self.eps_: warnings.warn('Objective did not converge, you might want' ' to increase the number of iterations') # return self for chaining fit and predict calls return self class MultiTaskLasso(MultiTaskElasticNet): """Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of earch row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- alpha : float, optional Constant that multiplies the L1/L2 term. Defaults to 1.0 fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. warm_start : bool, optional When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4 random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- coef_ : array, shape (n_tasks, n_features) parameter vector (W in the cost function formula) intercept_ : array, shape (n_tasks,) independent term in decision function. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskLasso(alpha=0.1) >>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]]) MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000, normalize=False, random_state=None, selection='cyclic', tol=0.0001, warm_start=False) >>> print(clf.coef_) [[ 0.89393398 0. ] [ 0.89393398 0. ]] >>> print(clf.intercept_) [ 0.10606602 0.10606602] See also -------- Lasso, MultiTaskElasticNet Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=1e-4, warm_start=False, random_state=None, selection='cyclic'): self.alpha = alpha self.coef_ = None self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.l1_ratio = 1.0 self.random_state = random_state self.selection = selection class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 ElasticNet with built-in cross-validation. The optimization objective for MultiTaskElasticNet is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automatically. n_alphas : int, optional Number of alphas along the regularization path l1_ratio : float or array of floats The ElasticNet mixing parameter, with 0 < l1_ratio <= 1. For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) or \ (n_l1_ratio, n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas) The grid of alphas used for fitting, for each l1_ratio l1_ratio_ : float best l1_ratio obtained by cross-validation. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.MultiTaskElasticNetCV() >>> clf.fit([[0,0], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [2, 2]]) ... #doctest: +NORMALIZE_WHITESPACE MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001, fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100, n_jobs=1, normalize=False, random_state=None, selection='cyclic', tol=0.0001, verbose=0) >>> print(clf.coef_) [[ 0.52875032 0.46958558] [ 0.52875032 0.46958558]] >>> print(clf.intercept_) [ 0.00166409 0.00166409] See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskLassoCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(enet_path) def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, cv=None, copy_X=True, verbose=0, n_jobs=1, random_state=None, selection='cyclic'): self.l1_ratio = l1_ratio self.eps = eps self.n_alphas = n_alphas self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.tol = tol self.cv = cv self.copy_X = copy_X self.verbose = verbose self.n_jobs = n_jobs self.random_state = random_state self.selection = selection class MultiTaskLassoCV(LinearModelCV, RegressorMixin): """Multi-task L1/L2 Lasso with built-in cross-validation. The optimization objective for MultiTaskLasso is:: (1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21 Where:: ||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} i.e. the sum of norm of each row. Read more in the :ref:`User Guide <multi_task_lasso>`. Parameters ---------- eps : float, optional Length of the path. ``eps=1e-3`` means that ``alpha_min / alpha_max = 1e-3``. alphas : array-like, optional List of alphas where to compute the models. If not provided, set automaticlly. n_alphas : int, optional Number of alphas along the regularization path fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If ``True``, the regressors X will be normalized before regression. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. max_iter : int, optional The maximum number of iterations. tol : float, optional The tolerance for the optimization: if the updates are smaller than ``tol``, the optimization code checks the dual gap for optimality and continues until it is smaller than ``tol``. cv : integer or cross-validation generator, optional If an integer is passed, it is the number of fold (default 3). Specific cross-validation objects can be passed, see the :mod:`sklearn.cross_validation` module for the list of possible objects. verbose : bool or integer Amount of verbosity. n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs. Note that this is used only if multiple values for l1_ratio are given. selection : str, default 'cyclic' If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to 'random') often leads to significantly faster convergence especially when tol is higher than 1e-4. random_state : int, RandomState instance, or None (default) The seed of the pseudo random number generator that selects a random feature to update. Useful only when selection is set to 'random'. Attributes ---------- intercept_ : array, shape (n_tasks,) Independent term in decision function. coef_ : array, shape (n_tasks, n_features) Parameter vector (W in the cost function formula). alpha_ : float The amount of penalization chosen by cross validation mse_path_ : array, shape (n_alphas, n_folds) mean square error for the test set on each fold, varying alpha alphas_ : numpy array, shape (n_alphas,) The grid of alphas used for fitting. n_iter_ : int number of iterations run by the coordinate descent solver to reach the specified tolerance for the optimal alpha. See also -------- MultiTaskElasticNet ElasticNetCV MultiTaskElasticNetCV Notes ----- The algorithm used to fit the model is coordinate descent. To avoid unnecessary memory duplication the X argument of the fit method should be directly passed as a Fortran-contiguous numpy array. """ path = staticmethod(lasso_path) def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=1e-4, copy_X=True, cv=None, verbose=False, n_jobs=1, random_state=None, selection='cyclic'): super(MultiTaskLassoCV, self).__init__( eps=eps, n_alphas=n_alphas, alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, max_iter=max_iter, tol=tol, copy_X=copy_X, cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state, selection=selection)
bsd-3-clause
misdoro/python-ase
ase/neb.py
2
15925
# -*- coding: utf-8 -*- import threading from math import sqrt import numpy as np import ase.parallel as mpi from ase.calculators.calculator import Calculator from ase.calculators.singlepoint import SinglePointCalculator from ase.io import read from ase.optimize import BFGS from ase.utils.geometry import find_mic class NEB: def __init__(self, images, k=0.1, climb=False, parallel=False, world=None): """Nudged elastic band. images: list of Atoms objects Images defining path from initial to final state. k: float or list of floats Spring constant(s) in eV/Ang. One number or one for each spring. climb: bool Use a climbing image (default is no climbing image). parallel: bool Distribute images over processors. """ self.images = images self.climb = climb self.parallel = parallel self.natoms = len(images[0]) self.nimages = len(images) self.emax = np.nan if isinstance(k, (float, int)): k = [k] * (self.nimages - 1) self.k = list(k) if world is None: world = mpi.world self.world = world if parallel: assert world.size == 1 or world.size % (self.nimages - 2) == 0 def interpolate(self, method='linear'): interpolate(self.images) if method == 'idpp': self.idpp_interpolate(traj=None, log=None) def idpp_interpolate(self, traj='idpp.traj', log='idpp.log', fmax=0.1, optimizer=BFGS): d1 = self.images[0].get_all_distances() d2 = self.images[-1].get_all_distances() d = (d2 - d1) / (self.nimages - 1) old = [] for i, image in enumerate(self.images): old.append(image.calc) image.calc = IDPP(d1 + i * d) opt = BFGS(self, trajectory=traj, logfile=log) opt.run(fmax=0.1) for image, calc in zip(self.images, old): image.calc = calc def get_positions(self): positions = np.empty(((self.nimages - 2) * self.natoms, 3)) n1 = 0 for image in self.images[1:-1]: n2 = n1 + self.natoms positions[n1:n2] = image.get_positions() n1 = n2 return positions def set_positions(self, positions): n1 = 0 for image in self.images[1:-1]: n2 = n1 + self.natoms image.set_positions(positions[n1:n2]) n1 = n2 # Parallel NEB with Jacapo needs this: try: image.get_calculator().set_atoms(image) except AttributeError: pass def get_forces(self): """Evaluate and return the forces.""" images = self.images forces = np.empty(((self.nimages - 2), self.natoms, 3)) energies = np.empty(self.nimages - 2) if not self.parallel: # Do all images - one at a time: for i in range(1, self.nimages - 1): energies[i - 1] = images[i].get_potential_energy() forces[i - 1] = images[i].get_forces() elif self.world.size == 1: def run(image, energies, forces): energies[:] = image.get_potential_energy() forces[:] = image.get_forces() threads = [threading.Thread(target=run, args=(images[i], energies[i - 1:i], forces[i - 1:i])) for i in range(1, self.nimages - 1)] for thread in threads: thread.start() for thread in threads: thread.join() else: # Parallelize over images: i = self.world.rank * (self.nimages - 2) // self.world.size + 1 try: energies[i - 1] = images[i].get_potential_energy() forces[i - 1] = images[i].get_forces() except: # Make sure other images also fail: error = self.world.sum(1.0) raise else: error = self.world.sum(0.0) if error: raise RuntimeError('Parallel NEB failed!') for i in range(1, self.nimages - 1): root = (i - 1) * self.world.size // (self.nimages - 2) self.world.broadcast(energies[i - 1:i], root) self.world.broadcast(forces[i - 1], root) imax = 1 + np.argsort(energies)[-1] self.emax = energies[imax - 1] tangent1 = images[1].get_positions() - images[0].get_positions() for i in range(1, self.nimages - 1): tangent2 = (images[i + 1].get_positions() - images[i].get_positions()) if i < imax: tangent = tangent2 elif i > imax: tangent = tangent1 else: tangent = tangent1 + tangent2 tt = np.vdot(tangent, tangent) f = forces[i - 1] ft = np.vdot(f, tangent) if i == imax and self.climb: f -= 2 * ft / tt * tangent else: f -= ft / tt * tangent f -= np.vdot(tangent1 * self.k[i - 1] - tangent2 * self.k[i], tangent) / tt * tangent tangent1 = tangent2 return forces.reshape((-1, 3)) def get_potential_energy(self): return self.emax def __len__(self): return (self.nimages - 2) * self.natoms class IDPP(Calculator): """Image dependent pair potential. See: Improved initial guess for minimum energy path calculations. Søren Smidstrup, Andreas Pedersen, Kurt Stokbro and Hannes Jónsson Chem. Phys. 140, 214106 (2014) """ implemented_properties = ['energy', 'forces'] def __init__(self, target): Calculator.__init__(self) self.target = target def calculate(self, atoms, properties, system_changes): Calculator.calculate(self, atoms, properties, system_changes) P = atoms.positions D = np.array([P - p for p in P]) # all distance vectors d = (D**2).sum(2)**0.5 dd = d - self.target d.ravel()[::len(d) + 1] = 1 # avoid dividing by zero d4 = d**4 e = 0.5 * (dd**2 / d4).sum() f = -2 * ((dd * (1 - 2 * dd / d) / d**5)[..., np.newaxis] * D).sum(0) self.results = {'energy': e, 'forces': f} class SingleCalculatorNEB(NEB): def __init__(self, images, k=0.1, climb=False): if isinstance(images, str): # this is a filename traj = read(images, '0:') images = [] for atoms in traj: images.append(atoms) NEB.__init__(self, images, k, climb, False) self.calculators = [None] * self.nimages self.energies_ok = False def interpolate(self, initial=0, final=-1, mic=False): """Interpolate linearly between initial and final images.""" if final < 0: final = self.nimages + final n = final - initial pos1 = self.images[initial].get_positions() pos2 = self.images[final].get_positions() dist = (pos2 - pos1) if mic: cell = self.images[initial].get_cell() assert((cell == self.images[final].get_cell()).all()) pbc = self.images[initial].get_pbc() assert((pbc == self.images[final].get_pbc()).all()) dist, D_len = find_mic(dist, cell, pbc) dist /= n for i in range(1, n): self.images[initial + i].set_positions(pos1 + i * dist) def refine(self, steps=1, begin=0, end=-1, mic=False): """Refine the NEB trajectory.""" if end < 0: end = self.nimages + end j = begin n = end - begin for i in range(n): for k in range(steps): self.images.insert(j + 1, self.images[j].copy()) self.calculators.insert(j + 1, None) self.k[j:j + 1] = [self.k[j] * (steps + 1)] * (steps + 1) self.nimages = len(self.images) self.interpolate(j, j + steps + 1, mic=mic) j += steps + 1 def set_positions(self, positions): # new positions -> new forces if self.energies_ok: # restore calculators self.set_calculators(self.calculators[1:-1]) NEB.set_positions(self, positions) def get_calculators(self): """Return the original calculators.""" calculators = [] for i, image in enumerate(self.images): if self.calculators[i] is None: calculators.append(image.get_calculator()) else: calculators.append(self.calculators[i]) return calculators def set_calculators(self, calculators): """Set new calculators to the images.""" self.energies_ok = False if not isinstance(calculators, list): calculators = [calculators] * self.nimages n = len(calculators) if n == self.nimages: for i in range(self.nimages): self.images[i].set_calculator(calculators[i]) elif n == self.nimages - 2: for i in range(1, self.nimages - 1): self.images[i].set_calculator(calculators[i - 1]) else: raise RuntimeError( 'len(calculators)=%d does not fit to len(images)=%d' % (n, self.nimages)) def get_energies_and_forces(self, all=False): """Evaluate energies and forces and hide the calculators""" if self.energies_ok: return self.emax = -1.e32 def calculate_and_hide(i): image = self.images[i] calc = image.get_calculator() if self.calculators[i] is None: self.calculators[i] = calc if calc is not None: if not isinstance(calc, SinglePointCalculator): self.images[i].set_calculator( SinglePointCalculator( image, energy=image.get_potential_energy(), forces=image.get_forces())) self.emax = min(self.emax, image.get_potential_energy()) if all and self.calculators[0] is None: calculate_and_hide(0) # Do all images - one at a time: for i in range(1, self.nimages - 1): calculate_and_hide(i) if all and self.calculators[-1] is None: calculate_and_hide(-1) self.energies_ok = True def get_forces(self): self.get_energies_and_forces() return NEB.get_forces(self) def n(self): return self.nimages def write(self, filename): from ase.io.trajectory import Trajectory traj = Trajectory(filename, 'w', self) traj.write() traj.close() def __add__(self, other): for image in other: self.images.append(image) return self def fit0(E, F, R): """Constructs curve parameters from the NEB images.""" E = np.array(E) - E[0] n = len(E) Efit = np.empty((n - 1) * 20 + 1) Sfit = np.empty((n - 1) * 20 + 1) s = [0] for i in range(n - 1): s.append(s[-1] + sqrt(((R[i + 1] - R[i])**2).sum())) lines = [] dEds0 = None for i in range(n): if i == 0: d = R[1] - R[0] ds = 0.5 * s[1] elif i == n - 1: d = R[-1] - R[-2] ds = 0.5 * (s[-1] - s[-2]) else: d = R[i + 1] - R[i - 1] ds = 0.25 * (s[i + 1] - s[i - 1]) d = d / sqrt((d**2).sum()) dEds = -(F[i] * d).sum() x = np.linspace(s[i] - ds, s[i] + ds, 3) y = E[i] + dEds * (x - s[i]) lines.append((x, y)) if i > 0: s0 = s[i - 1] s1 = s[i] x = np.linspace(s0, s1, 20, endpoint=False) c = np.linalg.solve(np.array([(1, s0, s0**2, s0**3), (1, s1, s1**2, s1**3), (0, 1, 2 * s0, 3 * s0**2), (0, 1, 2 * s1, 3 * s1**2)]), np.array([E[i - 1], E[i], dEds0, dEds])) y = c[0] + x * (c[1] + x * (c[2] + x * c[3])) Sfit[(i - 1) * 20:i * 20] = x Efit[(i - 1) * 20:i * 20] = y dEds0 = dEds Sfit[-1] = s[-1] Efit[-1] = E[-1] return s, E, Sfit, Efit, lines class NEBtools: """Class to make many of the common tools for NEB analysis available to the user. Useful for scripting the output of many jobs. Initialize with list of images which make up a single band.""" def __init__(self, images): self._images = images def get_barrier(self, fit=True, raw=False): """Returns the barrier estimate from the NEB, along with the Delta E of the elementary reaction. If fit=True, the barrier is estimated based on the interpolated fit to the images; if fit=False, the barrier is taken as the maximum-energy image without interpolation. Set raw=True to get the raw energy of the transition state instead of the forward barrier.""" s, E, Sfit, Efit, lines = self.get_fit() dE = E[-1] - E[0] if fit: barrier = max(Efit) else: barrier = max(E) if raw: barrier += self._images[0].get_potential_energy() return barrier, dE def plot_band(self, ax=None): """Plots the NEB band on matplotlib axes object 'ax'. If ax=None returns a new figure object.""" if not ax: from matplotlib import pyplot fig = pyplot.figure() ax = fig.add_subplot(111) else: fig = None s, E, Sfit, Efit, lines = self.get_fit() ax.plot(s, E, 'o') for x, y in lines: ax.plot(x, y, '-g') ax.plot(Sfit, Efit, 'k-') ax.set_xlabel('path [$\AA$]') ax.set_ylabel('energy [eV]') Ef = max(Efit) - E[0] Er = max(Efit) - E[-1] dE = E[-1] - E[0] ax.set_title('$E_\mathrm{f} \\approx$ %.3f eV; ' '$E_\mathrm{r} \\approx$ %.3f eV; ' '$\\Delta E$ = %.3f eV' % (Ef, Er, dE)) return fig def get_fmax(self): """Returns fmax, as used by optimizers with NEB.""" neb = NEB(self._images) forces = neb.get_forces() return np.sqrt((forces**2).sum(axis=1).max()) def get_fit(self): """Returns the parameters for fitting images to band.""" images = self._images if not hasattr(images, 'repeat'): from ase.gui.images import Images images = Images(images) N = images.repeat.prod() natoms = images.natoms // N R = images.P[:, :natoms] E = images.E F = images.F[:, :natoms] s, E, Sfit, Efit, lines = fit0(E, F, R) return s, E, Sfit, Efit, lines def interpolate(images): """Given a list of images, linearly interpolate the positions of the interior images.""" pos1 = images[0].get_positions() pos2 = images[-1].get_positions() d = (pos2 - pos1) / (len(images) - 1.0) for i in range(1, len(images) - 1): images[i].set_positions(pos1 + i * d) # Parallel NEB with Jacapo needs this: try: images[i].get_calculator().set_atoms(images[i]) except AttributeError: pass
gpl-2.0
harisbal/pandas
asv_bench/benchmarks/io/sql.py
5
5422
import sqlite3 import numpy as np import pandas.util.testing as tm from pandas import DataFrame, date_range, read_sql_query, read_sql_table from sqlalchemy import create_engine class SQL(object): params = ['sqlalchemy', 'sqlite'] param_names = ['connection'] def setup(self, connection): N = 10000 con = {'sqlalchemy': create_engine('sqlite:///:memory:'), 'sqlite': sqlite3.connect(':memory:')} self.table_name = 'test_type' self.query_all = 'SELECT * FROM {}'.format(self.table_name) self.con = con[connection] self.df = DataFrame({'float': np.random.randn(N), 'float_with_nan': np.random.randn(N), 'string': ['foo'] * N, 'bool': [True] * N, 'int': np.random.randint(0, N, size=N), 'datetime': date_range('2000-01-01', periods=N, freq='s')}, index=tm.makeStringIndex(N)) self.df.loc[1000:3000, 'float_with_nan'] = np.nan self.df['datetime_string'] = self.df['datetime'].astype(str) self.df.to_sql(self.table_name, self.con, if_exists='replace') def time_to_sql_dataframe(self, connection): self.df.to_sql('test1', self.con, if_exists='replace') def time_read_sql_query(self, connection): read_sql_query(self.query_all, self.con) class WriteSQLDtypes(object): params = (['sqlalchemy', 'sqlite'], ['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime']) param_names = ['connection', 'dtype'] def setup(self, connection, dtype): N = 10000 con = {'sqlalchemy': create_engine('sqlite:///:memory:'), 'sqlite': sqlite3.connect(':memory:')} self.table_name = 'test_type' self.query_col = 'SELECT {} FROM {}'.format(dtype, self.table_name) self.con = con[connection] self.df = DataFrame({'float': np.random.randn(N), 'float_with_nan': np.random.randn(N), 'string': ['foo'] * N, 'bool': [True] * N, 'int': np.random.randint(0, N, size=N), 'datetime': date_range('2000-01-01', periods=N, freq='s')}, index=tm.makeStringIndex(N)) self.df.loc[1000:3000, 'float_with_nan'] = np.nan self.df['datetime_string'] = self.df['datetime'].astype(str) self.df.to_sql(self.table_name, self.con, if_exists='replace') def time_to_sql_dataframe_column(self, connection, dtype): self.df[[dtype]].to_sql('test1', self.con, if_exists='replace') def time_read_sql_query_select_column(self, connection, dtype): read_sql_query(self.query_col, self.con) class ReadSQLTable(object): def setup(self): N = 10000 self.table_name = 'test' self.con = create_engine('sqlite:///:memory:') self.df = DataFrame({'float': np.random.randn(N), 'float_with_nan': np.random.randn(N), 'string': ['foo'] * N, 'bool': [True] * N, 'int': np.random.randint(0, N, size=N), 'datetime': date_range('2000-01-01', periods=N, freq='s')}, index=tm.makeStringIndex(N)) self.df.loc[1000:3000, 'float_with_nan'] = np.nan self.df['datetime_string'] = self.df['datetime'].astype(str) self.df.to_sql(self.table_name, self.con, if_exists='replace') def time_read_sql_table_all(self): read_sql_table(self.table_name, self.con) def time_read_sql_table_parse_dates(self): read_sql_table(self.table_name, self.con, columns=['datetime_string'], parse_dates=['datetime_string']) class ReadSQLTableDtypes(object): params = ['float', 'float_with_nan', 'string', 'bool', 'int', 'datetime'] param_names = ['dtype'] def setup(self, dtype): N = 10000 self.table_name = 'test' self.con = create_engine('sqlite:///:memory:') self.df = DataFrame({'float': np.random.randn(N), 'float_with_nan': np.random.randn(N), 'string': ['foo'] * N, 'bool': [True] * N, 'int': np.random.randint(0, N, size=N), 'datetime': date_range('2000-01-01', periods=N, freq='s')}, index=tm.makeStringIndex(N)) self.df.loc[1000:3000, 'float_with_nan'] = np.nan self.df['datetime_string'] = self.df['datetime'].astype(str) self.df.to_sql(self.table_name, self.con, if_exists='replace') def time_read_sql_table_column(self, dtype): read_sql_table(self.table_name, self.con, columns=[dtype]) from ..pandas_vb_common import setup # noqa: F401
bsd-3-clause
wkfwkf/statsmodels
statsmodels/datasets/cancer/data.py
25
1732
"""Breast Cancer Data""" __docformat__ = 'restructuredtext' COPYRIGHT = """???""" TITLE = """Breast Cancer Data""" SOURCE = """ This is the breast cancer data used in Owen's empirical likelihood. It is taken from Rice, J.A. Mathematical Statistics and Data Analysis. http://www.cengage.com/statistics/discipline_content/dataLibrary.html """ DESCRSHORT = """Breast Cancer and county population""" DESCRLONG = """The number of breast cancer observances in various counties""" #suggested notes NOTE = """:: Number of observations: 301 Number of variables: 2 Variable name definitions: cancer - The number of breast cancer observances population - The population of the county """ import numpy as np from statsmodels.datasets import utils as du from os.path import dirname, abspath def load(): """ Load the data and return a Dataset class instance. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. """ data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float) def load_pandas(): data = _get_data() ##### SET THE INDICES ##### #NOTE: None for exog_idx is the complement of endog_idx return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=float) def _get_data(): filepath = dirname(abspath(__file__)) ##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv ##### data = np.recfromtxt(open(filepath + '/cancer.csv', 'rb'), delimiter=",", names = True, dtype=float) return data
bsd-3-clause
BigDataforYou/movie_recommendation_workshop_1
big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/sparse/panel.py
1
18704
""" Data structures for sparse float data. Life is made simpler by dealing only with float64 data """ # pylint: disable=E1101,E1103,W0231 import warnings from pandas.compat import lrange, zip from pandas import compat import numpy as np from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.frame import DataFrame from pandas.core.panel import Panel from pandas.sparse.frame import SparseDataFrame from pandas.util.decorators import deprecate import pandas.core.common as com import pandas.core.ops as ops import pandas.lib as lib class SparsePanelAxis(object): def __init__(self, cache_field, frame_attr): self.cache_field = cache_field self.frame_attr = frame_attr def __get__(self, obj, type=None): return getattr(obj, self.cache_field, None) def __set__(self, obj, value): value = _ensure_index(value) if isinstance(value, MultiIndex): raise NotImplementedError("value cannot be a MultiIndex") for v in compat.itervalues(obj._frames): setattr(v, self.frame_attr, value) setattr(obj, self.cache_field, value) class SparsePanel(Panel): """ Sparse version of Panel Parameters ---------- frames : dict of DataFrame objects items : array-like major_axis : array-like minor_axis : array-like default_kind : {'block', 'integer'}, default 'block' Default sparse kind for converting Series to SparseSeries. Will not override SparseSeries passed into constructor default_fill_value : float Default fill_value for converting Series to SparseSeries. Will not override SparseSeries passed in Notes ----- """ ndim = 3 _typ = 'panel' _subtyp = 'sparse_panel' def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None, default_fill_value=np.nan, default_kind='block', copy=False): # deprecation #11157 warnings.warn("SparsePanel is deprecated and will be removed in a " "future version", FutureWarning, stacklevel=2) if frames is None: frames = {} if isinstance(frames, np.ndarray): new_frames = {} for item, vals in zip(items, frames): new_frames[item] = SparseDataFrame( vals, index=major_axis, columns=minor_axis, default_fill_value=default_fill_value, default_kind=default_kind) frames = new_frames if not isinstance(frames, dict): raise TypeError('input must be a dict, a %r was passed' % type(frames).__name__) self.default_fill_value = fill_value = default_fill_value self.default_kind = kind = default_kind # pre-filter, if necessary if items is None: items = Index(sorted(frames.keys())) items = _ensure_index(items) (clean_frames, major_axis, minor_axis) = _convert_frames(frames, major_axis, minor_axis, kind=kind, fill_value=fill_value) self._frames = clean_frames # do we want to fill missing ones? for item in items: if item not in clean_frames: raise ValueError('column %r not found in data' % item) self._items = items self.major_axis = major_axis self.minor_axis = minor_axis def _consolidate_inplace(self): # pragma: no cover # do nothing when DataFrame calls this method pass def __array_wrap__(self, result): return SparsePanel(result, items=self.items, major_axis=self.major_axis, minor_axis=self.minor_axis, default_kind=self.default_kind, default_fill_value=self.default_fill_value) @classmethod def from_dict(cls, data): """ Analogous to Panel.from_dict """ return SparsePanel(data) def to_dense(self): """ Convert SparsePanel to (dense) Panel Returns ------- dense : Panel """ return Panel(self.values, self.items, self.major_axis, self.minor_axis) def as_matrix(self): return self.values @property def values(self): # return dense values return np.array([self._frames[item].values for item in self.items]) # need a special property for items to make the field assignable _items = None def _get_items(self): return self._items def _set_items(self, new_items): new_items = _ensure_index(new_items) if isinstance(new_items, MultiIndex): raise NotImplementedError("itemps cannot be a MultiIndex") # need to create new frames dict old_frame_dict = self._frames old_items = self._items self._frames = dict((new_k, old_frame_dict[old_k]) for new_k, old_k in zip(new_items, old_items)) self._items = new_items items = property(fget=_get_items, fset=_set_items) # DataFrame's index major_axis = SparsePanelAxis('_major_axis', 'index') # DataFrame's columns / "items" minor_axis = SparsePanelAxis('_minor_axis', 'columns') def _ixs(self, i, axis=0): """ for compat as we don't support Block Manager here i : int, slice, or sequence of integers axis : int """ key = self._get_axis(axis)[i] # xs cannot handle a non-scalar key, so just reindex here if com.is_list_like(key): return self.reindex(**{self._get_axis_name(axis): key}) return self.xs(key, axis=axis) def _slice(self, slobj, axis=0, kind=None): """ for compat as we don't support Block Manager here """ axis = self._get_axis_name(axis) index = self._get_axis(axis) return self.reindex(**{axis: index[slobj]}) def _get_item_cache(self, key): return self._frames[key] def __setitem__(self, key, value): if isinstance(value, DataFrame): value = value.reindex(index=self.major_axis, columns=self.minor_axis) if not isinstance(value, SparseDataFrame): value = value.to_sparse(fill_value=self.default_fill_value, kind=self.default_kind) else: raise ValueError('only DataFrame objects can be set currently') self._frames[key] = value if key not in self.items: self._items = Index(list(self.items) + [key]) def set_value(self, item, major, minor, value): """ Quickly set single value at (item, major, minor) location Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar Notes ----- This method *always* returns a new object. It is not particularly efficient but is provided for API compatibility with Panel Returns ------- panel : SparsePanel """ dense = self.to_dense().set_value(item, major, minor, value) return dense.to_sparse(kind=self.default_kind, fill_value=self.default_fill_value) def __delitem__(self, key): loc = self.items.get_loc(key) indices = lrange(loc) + lrange(loc + 1, len(self.items)) del self._frames[key] self._items = self._items.take(indices) def __getstate__(self): # pickling from pandas.io.pickle import _pickle_array return (self._frames, _pickle_array(self.items), _pickle_array(self.major_axis), _pickle_array(self.minor_axis), self.default_fill_value, self.default_kind) def __setstate__(self, state): frames, items, major, minor, fv, kind = state from pandas.io.pickle import _unpickle_array self.default_fill_value = fv self.default_kind = kind self._items = _ensure_index(_unpickle_array(items)) self._major_axis = _ensure_index(_unpickle_array(major)) self._minor_axis = _ensure_index(_unpickle_array(minor)) self._frames = frames def copy(self, deep=True): """ Make a copy of the sparse panel Returns ------- copy : SparsePanel """ d = self._construct_axes_dict() if deep: new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames)) d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d)) else: new_data = self._frames.copy() d['default_fill_value'] = self.default_fill_value d['default_kind'] = self.default_kind return SparsePanel(new_data, **d) def to_frame(self, filter_observations=True): """ Convert SparsePanel to (dense) DataFrame Returns ------- frame : DataFrame """ if not filter_observations: raise TypeError('filter_observations=False not supported for ' 'SparsePanel.to_long') I, N, K = self.shape counts = np.zeros(N * K, dtype=int) d_values = {} d_indexer = {} for item in self.items: frame = self[item] values, major, minor = _stack_sparse_info(frame) # values are stacked column-major indexer = minor * N + major counts.put(indexer, counts.take(indexer) + 1) # cuteness d_values[item] = values d_indexer[item] = indexer # have full set of observations for each item mask = counts == I # for each item, take mask values at index locations for those sparse # values, and use that to select values values = np.column_stack([d_values[item][mask.take(d_indexer[item])] for item in self.items]) inds, = mask.nonzero() # still column major major_labels = inds % N minor_labels = inds // N index = MultiIndex(levels=[self.major_axis, self.minor_axis], labels=[major_labels, minor_labels], verify_integrity=False) df = DataFrame(values, index=index, columns=self.items) return df.sortlevel(level=0) to_long = deprecate('to_long', to_frame) toLong = deprecate('toLong', to_frame) def reindex(self, major=None, items=None, minor=None, major_axis=None, minor_axis=None, copy=False): """ Conform / reshape panel axis labels to new input labels Parameters ---------- major : array-like, default None items : array-like, default None minor : array-like, default None copy : boolean, default False Copy underlying SparseDataFrame objects Returns ------- reindexed : SparsePanel """ major = com._mut_exclusive(major=major, major_axis=major_axis) minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis) if com._all_none(items, major, minor): raise ValueError('Must specify at least one axis') major = self.major_axis if major is None else major minor = self.minor_axis if minor is None else minor if items is not None: new_frames = {} for item in items: if item in self._frames: new_frames[item] = self._frames[item] else: raise NotImplementedError('Reindexing with new items not ' 'yet supported') else: new_frames = self._frames if copy: new_frames = dict((k, v.copy()) for k, v in compat.iteritems(new_frames)) return SparsePanel(new_frames, items=items, major_axis=major, minor_axis=minor, default_fill_value=self.default_fill_value, default_kind=self.default_kind) def _combine(self, other, func, axis=0): if isinstance(other, DataFrame): return self._combineFrame(other, func, axis=axis) elif isinstance(other, Panel): return self._combinePanel(other, func) elif lib.isscalar(other): new_frames = dict((k, func(v, other)) for k, v in self.iteritems()) return self._new_like(new_frames) def _combineFrame(self, other, func, axis=0): index, columns = self._get_plane_axes(axis) axis = self._get_axis_number(axis) other = other.reindex(index=index, columns=columns) if axis == 0: new_values = func(self.values, other.values) elif axis == 1: new_values = func(self.values.swapaxes(0, 1), other.values.T) new_values = new_values.swapaxes(0, 1) elif axis == 2: new_values = func(self.values.swapaxes(0, 2), other.values) new_values = new_values.swapaxes(0, 2) # TODO: make faster! new_frames = {} for item, item_slice in zip(self.items, new_values): old_frame = self[item] ofv = old_frame.default_fill_value ok = old_frame.default_kind new_frames[item] = SparseDataFrame(item_slice, index=self.major_axis, columns=self.minor_axis, default_fill_value=ofv, default_kind=ok) return self._new_like(new_frames) def _new_like(self, new_frames): return SparsePanel(new_frames, self.items, self.major_axis, self.minor_axis, default_fill_value=self.default_fill_value, default_kind=self.default_kind) def _combinePanel(self, other, func): items = self.items.union(other.items) major = self.major_axis.union(other.major_axis) minor = self.minor_axis.union(other.minor_axis) # could check that everything's the same size, but forget it this = self.reindex(items=items, major=major, minor=minor) other = other.reindex(items=items, major=major, minor=minor) new_frames = {} for item in items: new_frames[item] = func(this[item], other[item]) if not isinstance(other, SparsePanel): new_default_fill = self.default_fill_value else: # maybe unnecessary new_default_fill = func(self.default_fill_value, other.default_fill_value) return SparsePanel(new_frames, items, major, minor, default_fill_value=new_default_fill, default_kind=self.default_kind) def major_xs(self, key): """ Return slice of panel along major axis Parameters ---------- key : object Major axis label Returns ------- y : DataFrame index -> minor axis, columns -> items """ slices = dict((k, v.xs(key)) for k, v in self.iteritems()) return DataFrame(slices, index=self.minor_axis, columns=self.items) def minor_xs(self, key): """ Return slice of panel along minor axis Parameters ---------- key : object Minor axis label Returns ------- y : SparseDataFrame index -> major axis, columns -> items """ slices = dict((k, v[key]) for k, v in self.iteritems()) return SparseDataFrame(slices, index=self.major_axis, columns=self.items, default_fill_value=self.default_fill_value, default_kind=self.default_kind) # TODO: allow SparsePanel to work with flex arithmetic. # pow and mod only work for scalars for now def pow(self, val, *args, **kwargs): """wrapper around `__pow__` (only works for scalar values)""" return self.__pow__(val) def mod(self, val, *args, **kwargs): """wrapper around `__mod__` (only works for scalar values""" return self.__mod__(val) # Sparse objects opt out of numexpr SparsePanel._add_aggregate_operations(use_numexpr=False) ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, ** ops.panel_special_funcs) SparseWidePanel = SparsePanel def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'): from pandas.core.panel import _get_combined_index output = {} for item, df in compat.iteritems(frames): if not isinstance(df, SparseDataFrame): df = SparseDataFrame(df, default_kind=kind, default_fill_value=fill_value) output[item] = df if index is None: all_indexes = [x.index for x in output.values()] index = _get_combined_index(all_indexes) if columns is None: all_columns = [x.columns for x in output.values()] columns = _get_combined_index(all_columns) index = _ensure_index(index) columns = _ensure_index(columns) for item, df in compat.iteritems(output): if not (df.index.equals(index) and df.columns.equals(columns)): output[item] = df.reindex(index=index, columns=columns) return output, index, columns def _stack_sparse_info(frame): lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)] # this is pretty fast minor_labels = np.repeat(np.arange(len(frame.columns)), lengths) inds_to_concat = [] vals_to_concat = [] for col in frame.columns: series = frame[col] if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) vals_to_concat.append(series.sp_values) major_labels = np.concatenate(inds_to_concat) sparse_values = np.concatenate(vals_to_concat) return sparse_values, major_labels, minor_labels
mit
bthirion/scikit-learn
doc/conf.py
10
9807
# -*- coding: utf-8 -*- # # scikit-learn documentation build configuration file, created by # sphinx-quickstart on Fri Jan 8 09:13:42 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from __future__ import print_function import sys import os from sklearn.externals.six import u # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath('sphinxext')) from github_link import make_linkcode_resolve import sphinx_gallery # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'numpy_ext.numpydoc', 'sphinx.ext.linkcode', 'sphinx.ext.doctest', 'sphinx_gallery.gen_gallery', 'sphinx_issues', ] # pngmath / imgmath compatibility layer for different sphinx versions import sphinx from distutils.version import LooseVersion if LooseVersion(sphinx.__version__) < LooseVersion('1.4'): extensions.append('sphinx.ext.pngmath') else: extensions.append('sphinx.ext.imgmath') autodoc_default_flags = ['members', 'inherited-members'] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # generate autosummary even if no references autosummary_generate = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # Generate the plots for the gallery plot_gallery = True # The master toctree document. master_doc = 'index' # General information about the project. project = u('scikit-learn') copyright = u('2007 - 2017, scikit-learn developers (BSD License)') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import sklearn version = sklearn.__version__ # The full version, including alpha/beta/rc tags. release = sklearn.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be # searched for source files. exclude_trees = ['_build', 'templates', 'includes'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'scikit-learn' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'oldversion': False, 'collapsiblesidebar': True, 'google_analytics': True, 'surveybanner': False, 'sprintbanner': True} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'scikit-learn' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'logos/scikit-learn-logo-small.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'logos/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['images'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'scikit-learndoc' # -- Options for LaTeX output ------------------------------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'), u('scikit-learn developers'), 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = "logos/scikit-learn-logo.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. latex_preamble = r""" \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats} \usepackage{enumitem} \setlistdepth{10} """ # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False trim_doctests_flags = True sphinx_gallery_conf = { 'doc_module': 'sklearn', 'backreferences_dir': os.path.join('modules', 'generated'), 'reference_url': { 'sklearn': None, 'matplotlib': 'http://matplotlib.org', 'numpy': 'http://docs.scipy.org/doc/numpy-1.8.1', 'scipy': 'http://docs.scipy.org/doc/scipy-0.13.3/reference'} } # The following dictionary contains the information used to create the # thumbnails for the front page of the scikit-learn home page. # key: first image in set # values: (number of plot in set, height of thumbnail) carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600, 'sphx_glr_plot_outlier_detection_003.png': 372, 'sphx_glr_plot_gpr_co2_001.png': 350, 'sphx_glr_plot_adaboost_twoclass_001.png': 372, 'sphx_glr_plot_compare_methods_001.png': 349} def make_carousel_thumbs(app, exception): """produces the final resized carousel images""" if exception is not None: return print('Preparing carousel images') image_dir = os.path.join(app.builder.outdir, '_images') for glr_plot, max_width in carousel_thumbs.items(): image = os.path.join(image_dir, glr_plot) if os.path.exists(image): c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png') sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190) # Config for sphinx_issues issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}' issues_github_path = 'scikit-learn/scikit-learn' issues_user_uri = 'https://github.com/{user}' def setup(app): # to hide/show the prompt in code examples: app.add_javascript('js/copybutton.js') app.connect('build-finished', make_carousel_thumbs) # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve('sklearn', u'https://github.com/scikit-learn/' 'scikit-learn/blob/{revision}/' '{package}/{path}#L{lineno}')
bsd-3-clause
chrsrds/scikit-learn
examples/plot_isotonic_regression.py
8
1764
""" =================== Isotonic Regression =================== An illustration of the isotonic regression on generated data. The isotonic regression finds a non-decreasing approximation of a function while minimizing the mean squared error on the training data. The benefit of such a model is that it does not assume any form for the target function such as linearity. For comparison a linear regression is also presented. """ print(__doc__) # Author: Nelle Varoquaux <[email protected]> # Alexandre Gramfort <[email protected]> # License: BSD import numpy as np import matplotlib.pyplot as plt from matplotlib.collections import LineCollection from sklearn.linear_model import LinearRegression from sklearn.isotonic import IsotonicRegression from sklearn.utils import check_random_state n = 100 x = np.arange(n) rs = check_random_state(0) y = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n)) # ############################################################################# # Fit IsotonicRegression and LinearRegression models ir = IsotonicRegression() y_ = ir.fit_transform(x, y) lr = LinearRegression() lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression # ############################################################################# # Plot result segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)] lc = LineCollection(segments, zorder=0) lc.set_array(np.ones(len(y))) lc.set_linewidths(np.full(n, 0.5)) fig = plt.figure() plt.plot(x, y, 'r.', markersize=12) plt.plot(x, y_, 'b.-', markersize=12) plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-') plt.gca().add_collection(lc) plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right') plt.title('Isotonic regression') plt.show()
bsd-3-clause
etkirsch/scikit-learn
sklearn/cluster/tests/test_affinity_propagation.py
341
2620
""" Testing for Clustering methods """ import numpy as np from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.cluster.affinity_propagation_ import AffinityPropagation from sklearn.cluster.affinity_propagation_ import affinity_propagation from sklearn.datasets.samples_generator import make_blobs from sklearn.metrics import euclidean_distances n_clusters = 3 centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 X, _ = make_blobs(n_samples=60, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=0) def test_affinity_propagation(): # Affinity Propagation algorithm # Compute similarities S = -euclidean_distances(X, squared=True) preference = np.median(S) * 10 # Compute Affinity Propagation cluster_centers_indices, labels = affinity_propagation( S, preference=preference) n_clusters_ = len(cluster_centers_indices) assert_equal(n_clusters, n_clusters_) af = AffinityPropagation(preference=preference, affinity="precomputed") labels_precomputed = af.fit(S).labels_ af = AffinityPropagation(preference=preference, verbose=True) labels = af.fit(X).labels_ assert_array_equal(labels, labels_precomputed) cluster_centers_indices = af.cluster_centers_indices_ n_clusters_ = len(cluster_centers_indices) assert_equal(np.unique(labels).size, n_clusters_) assert_equal(n_clusters, n_clusters_) # Test also with no copy _, labels_no_copy = affinity_propagation(S, preference=preference, copy=False) assert_array_equal(labels, labels_no_copy) # Test input validation assert_raises(ValueError, affinity_propagation, S[:, :-1]) assert_raises(ValueError, affinity_propagation, S, damping=0) af = AffinityPropagation(affinity="unknown") assert_raises(ValueError, af.fit, X) def test_affinity_propagation_predict(): # Test AffinityPropagation.predict af = AffinityPropagation(affinity="euclidean") labels = af.fit_predict(X) labels2 = af.predict(X) assert_array_equal(labels, labels2) def test_affinity_propagation_predict_error(): # Test exception in AffinityPropagation.predict # Not fitted. af = AffinityPropagation(affinity="euclidean") assert_raises(ValueError, af.predict, X) # Predict not supported when affinity="precomputed". S = np.dot(X, X.T) af = AffinityPropagation(affinity="precomputed") af.fit(S) assert_raises(ValueError, af.predict, X)
bsd-3-clause
QuLogic/cartopy
examples/gridlines_and_labels/tick_labels.py
6
1770
""" Tick Labels ----------- This example demonstrates adding tick labels to maps on rectangular projections using special tick formatters. """ import cartopy.crs as ccrs from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter import matplotlib.pyplot as plt def main(): fig = plt.figure(figsize=(8, 10)) # Label axes of a Plate Carree projection with a central longitude of 180: ax1 = fig.add_subplot(2, 1, 1, projection=ccrs.PlateCarree(central_longitude=180)) ax1.set_global() ax1.coastlines() ax1.set_xticks([0, 60, 120, 180, 240, 300, 360], crs=ccrs.PlateCarree()) ax1.set_yticks([-90, -60, -30, 0, 30, 60, 90], crs=ccrs.PlateCarree()) lon_formatter = LongitudeFormatter(zero_direction_label=True) lat_formatter = LatitudeFormatter() ax1.xaxis.set_major_formatter(lon_formatter) ax1.yaxis.set_major_formatter(lat_formatter) # Label axes of a Mercator projection without degree symbols in the labels # and formatting labels to include 1 decimal place: ax2 = fig.add_subplot(2, 1, 2, projection=ccrs.Mercator()) ax2.set_global() ax2.coastlines() ax2.set_xticks([-180, -120, -60, 0, 60, 120, 180], crs=ccrs.PlateCarree()) ax2.set_yticks([-78.5, -60, -25.5, 25.5, 60, 80], crs=ccrs.PlateCarree()) lon_formatter = LongitudeFormatter(number_format='.1f', degree_symbol='', dateline_direction_label=True) lat_formatter = LatitudeFormatter(number_format='.1f', degree_symbol='') ax2.xaxis.set_major_formatter(lon_formatter) ax2.yaxis.set_major_formatter(lat_formatter) plt.show() if __name__ == '__main__': main()
lgpl-3.0
equialgo/scikit-learn
examples/covariance/plot_outlier_detection.py
36
5023
""" ========================================== Outlier detection with several methods. ========================================== When the amount of contamination is known, this example illustrates three different ways of performing :ref:`outlier_detection`: - based on a robust estimator of covariance, which is assuming that the data are Gaussian distributed and performs better than the One-Class SVM in that case. - using the One-Class SVM and its ability to capture the shape of the data set, hence performing better when the data is strongly non-Gaussian, i.e. with two well-separated clusters; - using the Isolation Forest algorithm, which is based on random forests and hence more adapted to large-dimensional settings, even if it performs quite well in the examples below. - using the Local Outlier Factor to measure the local deviation of a given data point with respect to its neighbors by comparing their local density. The ground truth about inliers and outliers is given by the points colors while the orange-filled area indicates which points are reported as inliers by each method. Here, we assume that we know the fraction of outliers in the datasets. Thus rather than using the 'predict' method of the objects, we set the threshold on the decision_function to separate out the corresponding fraction. """ import numpy as np from scipy import stats import matplotlib.pyplot as plt import matplotlib.font_manager from sklearn import svm from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor print(__doc__) rng = np.random.RandomState(42) # Example settings n_samples = 200 outliers_fraction = 0.25 clusters_separation = [0, 1, 2] # define two outlier detection tools to be compared classifiers = { "One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05, kernel="rbf", gamma=0.1), "Robust covariance": EllipticEnvelope(contamination=outliers_fraction), "Isolation Forest": IsolationForest(max_samples=n_samples, contamination=outliers_fraction, random_state=rng), "Local Outlier Factor": LocalOutlierFactor( n_neighbors=35, contamination=outliers_fraction)} # Compare given classifiers under given settings xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100)) n_inliers = int((1. - outliers_fraction) * n_samples) n_outliers = int(outliers_fraction * n_samples) ground_truth = np.ones(n_samples, dtype=int) ground_truth[-n_outliers:] = -1 # Fit the problem with varying cluster separation for i, offset in enumerate(clusters_separation): np.random.seed(42) # Data generation X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset X = np.r_[X1, X2] # Add outliers X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))] # Fit the model plt.figure(figsize=(9, 7)) for i, (clf_name, clf) in enumerate(classifiers.items()): # fit the data and tag outliers if clf_name == "Local Outlier Factor": y_pred = clf.fit_predict(X) scores_pred = clf.negative_outlier_factor_ else: clf.fit(X) scores_pred = clf.decision_function(X) y_pred = clf.predict(X) threshold = stats.scoreatpercentile(scores_pred, 100 * outliers_fraction) n_errors = (y_pred != ground_truth).sum() # plot the levels lines and the points if clf_name == "Local Outlier Factor": # decision_function is private for LOF Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()]) else: Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) subplot = plt.subplot(2, 2, i + 1) subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7), cmap=plt.cm.Blues_r) a = subplot.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red') subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()], colors='orange') b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white') c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black') subplot.axis('tight') subplot.legend( [a.collections[0], b, c], ['learned decision function', 'true inliers', 'true outliers'], prop=matplotlib.font_manager.FontProperties(size=10), loc='lower right') subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors)) subplot.set_xlim((-7, 7)) subplot.set_ylim((-7, 7)) plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26) plt.suptitle("Outlier detection") plt.show()
bsd-3-clause
scottpurdy/NAB
nab/runner.py
3
10414
# ---------------------------------------------------------------------- # Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import multiprocessing import os import pandas try: import simplejson as json except ImportError: import json from nab.corpus import Corpus from nab.detectors.base import detectDataSet from nab.labeler import CorpusLabel from nab.optimizer import optimizeThreshold from nab.scorer import scoreCorpus from nab.util import updateThresholds, updateFinalResults class Runner(object): """ Class to run an endpoint (detect, optimize, or score) on the NAB benchmark using the specified set of profiles, thresholds, and/or detectors. """ def __init__(self, dataDir, resultsDir, labelPath, profilesPath, thresholdPath, numCPUs=None): """ @param dataDir (string) Directory where all the raw datasets exist. @param resultsDir (string) Directory where the detector anomaly scores will be scored. @param labelPath (string) Path where the labels of the datasets exist. @param profilesPath (string) Path to JSON file containing application profiles and associated cost matrices. @param thresholdPath (string) Path to thresholds dictionary containing the best thresholds (and their corresponding score) for a combination of detector and user profile. @probationaryPercent (float) Percent of each dataset which will be ignored during the scoring process. @param numCPUs (int) Number of CPUs to be used for calls to multiprocessing.pool.map """ self.dataDir = dataDir self.resultsDir = resultsDir self.labelPath = labelPath self.profilesPath = profilesPath self.thresholdPath = thresholdPath self.pool = multiprocessing.Pool(numCPUs) self.probationaryPercent = 0.15 self.windowSize = 0.10 self.corpus = None self.corpusLabel = None self.profiles = None def initialize(self): """Initialize all the relevant objects for the run.""" self.corpus = Corpus(self.dataDir) self.corpusLabel = CorpusLabel(path=self.labelPath, corpus=self.corpus) with open(self.profilesPath) as p: self.profiles = json.load(p) def detect(self, detectors): """Generate results file given a dictionary of detector classes Function that takes a set of detectors and a corpus of data and creates a set of files storing the alerts and anomaly scores given by the detectors @param detectors (dict) Dictionary with key value pairs of a detector name and its corresponding class constructor. """ print "\nRunning detection step" count = 0 args = [] for detectorName, detectorConstructor in detectors.iteritems(): for relativePath, dataSet in self.corpus.dataFiles.iteritems(): if self.corpusLabel.labels.has_key(relativePath): args.append( ( count, detectorConstructor( dataSet=dataSet, probationaryPercent=self.probationaryPercent), detectorName, self.corpusLabel.labels[relativePath]["label"], self.resultsDir, relativePath ) ) count += 1 # Using `map_async` instead of `map` so interrupts are properly handled. # See: http://stackoverflow.com/a/1408476 self.pool.map_async(detectDataSet, args).get(99999999) def optimize(self, detectorNames): """Optimize the threshold for each combination of detector and profile. @param detectorNames (list) List of detector names. @return thresholds (dict) Dictionary of dictionaries with detector names then profile names as keys followed by another dictionary containing the score and the threshold used to obtained that score. """ print "\nRunning optimize step" scoreFlag = False thresholds = {} for detectorName in detectorNames: resultsDetectorDir = os.path.join(self.resultsDir, detectorName) resultsCorpus = Corpus(resultsDetectorDir) thresholds[detectorName] = {} for profileName, profile in self.profiles.iteritems(): thresholds[detectorName][profileName] = optimizeThreshold( (self.pool, detectorName, profileName, profile["CostMatrix"], resultsDetectorDir, resultsCorpus, self.corpusLabel, self.probationaryPercent, scoreFlag)) updateThresholds(thresholds, self.thresholdPath) return thresholds def score(self, detectorNames, thresholds): """Score the performance of the detectors. Function that must be called only after detection result files have been generated and thresholds have been optimized. This looks at the result files and scores the performance of each detector specified and stores these results in a csv file. @param detectorNames (list) List of detector names. @param thresholds (dict) Dictionary of dictionaries with detector names then profile names as keys followed by another dictionary containing the score and the threshold used to obtained that score. """ print "\nRunning scoring step" scoreFlag = True baselines = {} self.resultsFiles = [] for detectorName in detectorNames: resultsDetectorDir = os.path.join(self.resultsDir, detectorName) resultsCorpus = Corpus(resultsDetectorDir) for profileName, profile in self.profiles.iteritems(): threshold = thresholds[detectorName][profileName]["threshold"] resultsDF = scoreCorpus(threshold, (self.pool, detectorName, profileName, profile["CostMatrix"], resultsDetectorDir, resultsCorpus, self.corpusLabel, self.probationaryPercent, scoreFlag)) scorePath = os.path.join(resultsDetectorDir, "%s_%s_scores.csv" %\ (detectorName, profileName)) resultsDF.to_csv(scorePath, index=False) print "%s detector benchmark scores written to %s" %\ (detectorName, scorePath) self.resultsFiles.append(scorePath) def normalize(self): """ Normalize the detectors' scores according to the baseline defined by the null detector, and print to the console. Function can only be called with the scoring step (i.e. runner.score()) preceding it. This reads the total score values from the results CSVs, and subtracts the relevant baseline value. The scores are then normalized by multiplying by 100 and dividing by perfect less the baseline, where the perfect score is the number of TPs possible. Note the results CSVs still contain the original scores, not normalized. """ print "\nRunning score normalization step" # Get baseline scores for each application profile. nullDir = os.path.join(self.resultsDir, "null") if not os.path.isdir(nullDir): raise IOError("No results directory for null detector. You must " "run the null detector before normalizing scores.") baselines = {} for profileName, _ in self.profiles.iteritems(): fileName = os.path.join(nullDir, "null_" + profileName + "_scores.csv") with open(fileName) as f: results = pandas.read_csv(f) baselines[profileName] = results["Score"].iloc[-1] # Get total number of TPs with open(self.labelPath, "rb") as f: labelsDict = json.load(f) tpCount = 0 for labels in labelsDict.values(): tpCount += len(labels) # Normalize the score from each results file. finalResults = {} for resultsFile in self.resultsFiles: profileName = [k for k in baselines.keys() if k in resultsFile][0] base = baselines[profileName] with open(resultsFile) as f: results = pandas.read_csv(f) # Calculate score: perfect = tpCount * self.profiles[profileName]["CostMatrix"]["tpWeight"] score = 100 * (results["Score"].iloc[-1] - base) / (perfect - base) # Add to results dict: resultsInfo = resultsFile.split(os.path.sep)[-1].split('.')[0] detector = resultsInfo.split('_')[0] profile = resultsInfo.replace(detector + "_", "").replace("_scores", "") if detector not in finalResults: finalResults[detector] = {} finalResults[detector][profile] = score print ("Final score for \'%s\' detector on \'%s\' profile = %.2f" % (detector, profile, score)) resultsPath = os.path.join(self.resultsDir, "final_results.json") updateFinalResults(finalResults, resultsPath) print "Final scores have been written to %s." % resultsPath
agpl-3.0
tdent/pycbc
bin/hdfcoinc/pycbc_plot_Nth_loudest_coinc_omicron.py
10
6303
""" Generates a plot that shows the time-frequency trace of Nth loudest coincident trigger overlaid on a background of Omicron triggers. """ import logging import h5py import numpy as np import argparse import glob from glue.ligolw import ligolw, lsctables, table, utils import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pycbc.events from pycbc.waveform import get_td_waveform, frequency_from_polarizations, amplitude_from_polarizations logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) class DefaultContentHandler(ligolw.LIGOLWContentHandler): pass lsctables.use_in(DefaultContentHandler) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--coinc-file', type=str, required=True, help='HDF file containing coincident CBC triggers') parser.add_argument('--single-ifo-trigs', type=str, required=True, help='HDF file containing single IFO CBC triggers') parser.add_argument('--ifo', type=str, required=True, help='IFO, L1 or H1') parser.add_argument('--tmpltbank-file', type=str, required=True, help='HDF file containing template information for CBC search') parser.add_argument('--output-file', type=str, required=True, help='Full path to output file') parser.add_argument('--loudest-event-number', type=int, required=True, default=1, help='Script will plot the Nth loudest coincident trigger') parser.add_argument('--omicron-dir', type=str, required=True, help='Directory containing Omicron triggers. Ex: /home/detchar/triggers/ER7/') parser.add_argument('--omicron-snr-thresh', type=int, required=False, default=5, help='SNR threshold for choosing which Omicron triggers to plot.') parser.add_argument('--plot-window', type=float, required=False, default=32, help='Time window to plot around CBC trigger') parser.add_argument('--omicron-channel',type=str, required=False, default='GDS-CALIB_STRAIN', help='Channel to plot Omicron triggers for, do not include IFO') parser.add_argument('--analysis-level', type=str, required=False, default='foreground', choices = ['foreground','background','background_exc'], help='Designates which level of the analysis output to search') args = parser.parse_args() logging.info('Reading HDF files') coinc_trig_file = h5py.File(args.coinc_file,'r') single_trig_file = h5py.File(args.single_ifo_trigs,'r') template_file = h5py.File(args.tmpltbank_file,'r') logging.info('Parsing HDF files') coinc_newsnr = coinc_trig_file[args.analysis_level]['stat'][:] Nth_loudest_idx = np.argsort(coinc_newsnr)[-args.loudest_event_number] if coinc_trig_file.attrs['detector_1'] == args.ifo: idx = coinc_trig_file[args.analysis_level]['trigger_id1'][Nth_loudest_idx] else: idx = coinc_trig_file[args.analysis_level]['trigger_id2'][Nth_loudest_idx] # get info about single detector triggers that comprise loudest background event # and calculate newSNR snr = single_trig_file[args.ifo]['snr'][idx] chisq = single_trig_file[args.ifo]['chisq'][idx] chisq_dof = single_trig_file[args.ifo]['chisq_dof'][idx] reduced_chisq = chisq/(2*chisq_dof - 2) newsnr = pycbc.events.ranking.newsnr(snr,reduced_chisq) cbc_end_time = single_trig_file[args.ifo]['end_time'][idx] template_id = single_trig_file[args.ifo]['template_id'][idx] m1 = template_file['mass1'][template_id] m2 = template_file['mass2'][template_id] s1z = template_file['spin1z'][template_id] s2z = template_file['spin2z'][template_id] omicron_start_time = cbc_end_time - args.plot_window omicron_end_time = cbc_end_time + args.plot_window logging.info('Fetching omicron triggers') # Generate list of directories to search over gps_era_start = str(omicron_start_time)[:5] gps_era_end = str(omicron_end_time)[:5] eras = map(str,range(int(gps_era_start),int(gps_era_end))) if not eras: eras = [gps_era_start] # Grab all relevant Omicron trigger files omicron_times = [] omicron_snr = [] omicron_freq = [] for era in eras: # Generate list of all Omicron SnglBurst xml trigger files file_list = glob.glob(args.omicron_dir + '/%s/%s_Omicron/%s/%s-%s_Omicron-*.xml.gz' %(args.ifo,args.omicron_channel,era,args.ifo,args.omicron_channel.replace('-','_'))) # Parse trigger files into SNR, time, and frequency for Omicron triggers for file_name in file_list: omicron_xml = utils.load_filename(file_name, contenthandler=DefaultContentHandler) snglburst_table = table.get_table(omicron_xml, lsctables.SnglBurstTable.tableName) for row in snglburst_table: if (row.snr > args.omicron_snr_thresh and omicron_start_time < row.peak_time < omicron_end_time): omicron_times.append(row.peak_time + row.peak_time_ns * 10**(-9)) omicron_snr.append(row.snr) omicron_freq.append(row.peak_frequency) # Generate inspiral waveform and calculate f(t) to plot on top of Omicron triggers hp, hc = get_td_waveform(approximant='SEOBNRv2', mass1=m1, mass2=m2, spin1x=0, spin1y=0, spin1z=s1z, spin2x=0, spin2y=0, spin2z=s2z, delta_t=(1./32768.), f_lower=30) f = frequency_from_polarizations(hp, hc) amp = amplitude_from_polarizations(hp, hc) stop_idx = amp.abs_max_loc()[1] f = f[:stop_idx] freq = np.array(f.data) times = np.array(f.sample_times) + cbc_end_time logging.info('Plotting') plt.figure(0) cm = plt.cm.get_cmap('Reds') plt.scatter(omicron_times,omicron_freq,c=omicron_snr,s=30,cmap=cm,linewidth=0) plt.grid(b=True, which='both') cbar = plt.colorbar() cbar.set_label('%s Omicron trigger SNR' % (args.ifo)) plt.yscale('log') plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') plt.xlim(omicron_start_time,omicron_end_time) plt.suptitle('%s CBC trigger SNR = ' % (args.ifo) + format(snr,'.2f') + ", newSNR = " + format(newsnr,'.2f'),fontsize=12) plt.title(format(m1,'.2f') + " - " + format(m2,'.2f') + " solar masses at GPS time " + format(cbc_end_time,'.2f'),fontsize=12) plt.hold(True) plt.plot(times,freq) plt.savefig(args.output_file) logging.info('Done! Exiting script.')
gpl-3.0
AIML/scikit-learn
examples/neighbors/plot_regression.py
349
1402
""" ============================ Nearest Neighbors regression ============================ Demonstrate the resolution of a regression problem using a k-Nearest Neighbor and the interpolation of the target using both barycenter and constant weights. """ print(__doc__) # Author: Alexandre Gramfort <[email protected]> # Fabian Pedregosa <[email protected]> # # License: BSD 3 clause (C) INRIA ############################################################################### # Generate sample data import numpy as np import matplotlib.pyplot as plt from sklearn import neighbors np.random.seed(0) X = np.sort(5 * np.random.rand(40, 1), axis=0) T = np.linspace(0, 5, 500)[:, np.newaxis] y = np.sin(X).ravel() # Add noise to targets y[::5] += 1 * (0.5 - np.random.rand(8)) ############################################################################### # Fit regression model n_neighbors = 5 for i, weights in enumerate(['uniform', 'distance']): knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights) y_ = knn.fit(X, y).predict(T) plt.subplot(2, 1, i + 1) plt.scatter(X, y, c='k', label='data') plt.plot(T, y_, c='g', label='prediction') plt.axis('tight') plt.legend() plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors, weights)) plt.show()
bsd-3-clause
PrashntS/scikit-learn
examples/cluster/plot_dbscan.py
346
2479
# -*- coding: utf-8 -*- """ =================================== Demo of DBSCAN clustering algorithm =================================== Finds core samples of high density and expands clusters from them. """ print(__doc__) import numpy as np from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.datasets.samples_generator import make_blobs from sklearn.preprocessing import StandardScaler ############################################################################## # Generate sample data centers = [[1, 1], [-1, -1], [1, -1]] X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4, random_state=0) X = StandardScaler().fit_transform(X) ############################################################################## # Compute DBSCAN db = DBSCAN(eps=0.3, min_samples=10).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) print('Estimated number of clusters: %d' % n_clusters_) print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)) print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)) print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)) print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels)) print("Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels_true, labels)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels)) ############################################################################## # Plot result import matplotlib.pyplot as plt # Black removed and is used for noise instead. unique_labels = set(labels) colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = 'k' class_member_mask = (labels == k) xy = X[class_member_mask & core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) xy = X[class_member_mask & ~core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show()
bsd-3-clause