harikrushna2272/github_code_codeparrot
			Text Generation
			• 
		
				0.1B
			• 
	
				Updated
					
				
				• 
					
					3
				
	
				
				
| repo_name
				 stringlengths 6 112 | path
				 stringlengths 4 204 | copies
				 stringlengths 1 3 | size
				 stringlengths 4 6 | content
				 stringlengths 714 810k | license
				 stringclasses 15
				values | 
|---|---|---|---|---|---|
| 
	kmike/scikit-learn | 
	sklearn/utils/__init__.py | 
	3 | 
	10094 | 
	"""
The :mod:`sklearn.utils` module includes various utilites.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array, check_arrays, safe_asarray,
                         assert_all_finite, array2d, atleast2d_or_csc,
                         atleast2d_or_csr, warn_if_not_float,
                         check_random_state)
from .class_weight import compute_class_weight
__all__ = ["murmurhash3_32", "as_float_array", "check_arrays", "safe_asarray",
           "assert_all_finite", "array2d", "atleast2d_or_csc",
           "atleast2d_or_csr", "warn_if_not_float", "check_random_state",
           "compute_class_weight"]
# Make sure that DeprecationWarning get printed
warnings.simplefilter("always", DeprecationWarning)
class deprecated(object):
    """Decorator to mark a function or class as deprecated.
    Issue a warning when the function is called/the class is instantiated and
    adds a warning to the docstring.
    The optional extra argument will be appended to the deprecation message
    and the docstring. Note: to use this with the default value for extra, put
    in an empty of parentheses:
    >>> from sklearn.utils import deprecated
    >>> deprecated() # doctest: +ELLIPSIS
    <sklearn.utils.deprecated object at ...>
    >>> @deprecated()
    ... def some_function(): pass
    """
    # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
    # but with many changes.
    def __init__(self, extra=''):
        """
        Parameters
        ----------
        extra: string
          to be added to the deprecation messages
        """
        self.extra = extra
    def __call__(self, obj):
        if isinstance(obj, type):
            return self._decorate_class(obj)
        else:
            return self._decorate_fun(obj)
    def _decorate_class(self, cls):
        msg = "Class %s is deprecated" % cls.__name__
        if self.extra:
            msg += "; %s" % self.extra
        # FIXME: we should probably reset __new__ for full generality
        init = cls.__init__
        def wrapped(*args, **kwargs):
            warnings.warn(msg, category=DeprecationWarning)
            return init(*args, **kwargs)
        cls.__init__ = wrapped
        wrapped.__name__ = '__init__'
        wrapped.__doc__ = self._update_doc(init.__doc__)
        wrapped.deprecated_original = init
        return cls
    def _decorate_fun(self, fun):
        """Decorate function fun"""
        msg = "Function %s is deprecated" % fun.__name__
        if self.extra:
            msg += "; %s" % self.extra
        def wrapped(*args, **kwargs):
            warnings.warn(msg, category=DeprecationWarning)
            return fun(*args, **kwargs)
        wrapped.__name__ = fun.__name__
        wrapped.__dict__ = fun.__dict__
        wrapped.__doc__ = self._update_doc(fun.__doc__)
        return wrapped
    def _update_doc(self, olddoc):
        newdoc = "DEPRECATED"
        if self.extra:
            newdoc = "%s: %s" % (newdoc, self.extra)
        if olddoc:
            newdoc = "%s\n\n%s" % (newdoc, olddoc)
        return newdoc
def safe_mask(X, mask):
    """Return a mask which is safe to use on X.
    Parameters
    ----------
    X : {array-like, sparse matrix}
        Data on which to apply mask.
    mask: array
        Mask to be used on X.
    Returns
    -------
        mask
    """
    mask = np.asanyarray(mask)
    if np.issubdtype(mask.dtype, np.int):
        return mask
    if hasattr(X, "toarray"):
        ind = np.arange(mask.shape[0])
        mask = ind[mask]
    return mask
def resample(*arrays, **options):
    """Resample arrays or sparse matrices in a consistent way
    The default strategy implements one step of the bootstrapping
    procedure.
    Parameters
    ----------
    `*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
    replace : boolean, True by default
        Implements resampling with replacement. If False, this will implement
        (sliced) random permutations.
    n_samples : int, None by default
        Number of samples to generate. If left to None this is
        automatically set to the first dimension of the arrays.
    random_state : int or RandomState instance
        Control the shuffling for reproducible behavior.
    Returns
    -------
    Sequence of resampled views of the collections. The original arrays are
    not impacted.
    Examples
    --------
    It is possible to mix sparse and dense arrays in the same run::
      >>> X = [[1., 0.], [2., 1.], [0., 0.]]
      >>> y = np.array([0, 1, 2])
      >>> from scipy.sparse import coo_matrix
      >>> X_sparse = coo_matrix(X)
      >>> from sklearn.utils import resample
      >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
      >>> X
      array([[ 1.,  0.],
             [ 2.,  1.],
             [ 1.,  0.]])
      >>> X_sparse                   # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
      <3x2 sparse matrix of type '<... 'numpy.float64'>'
          with 4 stored elements in Compressed Sparse Row format>
      >>> X_sparse.toarray()
      array([[ 1.,  0.],
             [ 2.,  1.],
             [ 1.,  0.]])
      >>> y
      array([0, 1, 0])
      >>> resample(y, n_samples=2, random_state=0)
      array([0, 1])
    See also
    --------
    :class:`sklearn.cross_validation.Bootstrap`
    :func:`sklearn.utils.shuffle`
    """
    random_state = check_random_state(options.pop('random_state', None))
    replace = options.pop('replace', True)
    max_n_samples = options.pop('n_samples', None)
    if options:
        raise ValueError("Unexpected kw arguments: %r" % options.keys())
    if len(arrays) == 0:
        return None
    first = arrays[0]
    n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
    if max_n_samples is None:
        max_n_samples = n_samples
    if max_n_samples > n_samples:
        raise ValueError("Cannot sample %d out of arrays with dim %d" % (
            max_n_samples, n_samples))
    arrays = check_arrays(*arrays, sparse_format='csr')
    if replace:
        indices = random_state.randint(0, n_samples, size=(max_n_samples,))
    else:
        indices = np.arange(n_samples)
        random_state.shuffle(indices)
        indices = indices[:max_n_samples]
    resampled_arrays = []
    for array in arrays:
        array = array[indices]
        resampled_arrays.append(array)
    if len(resampled_arrays) == 1:
        # syntactic sugar for the unit argument case
        return resampled_arrays[0]
    else:
        return resampled_arrays
def shuffle(*arrays, **options):
    """Shuffle arrays or sparse matrices in a consistent way
    This is a convenience alias to ``resample(*arrays, replace=False)`` to do
    random permutations of the collections.
    Parameters
    ----------
    `*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
    random_state : int or RandomState instance
        Control the shuffling for reproducible behavior.
    n_samples : int, None by default
        Number of samples to generate. If left to None this is
        automatically set to the first dimension of the arrays.
    Returns
    -------
    Sequence of shuffled views of the collections. The original arrays are
    not impacted.
    Examples
    --------
    It is possible to mix sparse and dense arrays in the same run::
      >>> X = [[1., 0.], [2., 1.], [0., 0.]]
      >>> y = np.array([0, 1, 2])
      >>> from scipy.sparse import coo_matrix
      >>> X_sparse = coo_matrix(X)
      >>> from sklearn.utils import shuffle
      >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
      >>> X
      array([[ 0.,  0.],
             [ 2.,  1.],
             [ 1.,  0.]])
      >>> X_sparse                   # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
      <3x2 sparse matrix of type '<... 'numpy.float64'>'
          with 3 stored elements in Compressed Sparse Row format>
      >>> X_sparse.toarray()
      array([[ 0.,  0.],
             [ 2.,  1.],
             [ 1.,  0.]])
      >>> y
      array([2, 1, 0])
      >>> shuffle(y, n_samples=2, random_state=0)
      array([0, 1])
    See also
    --------
    :func:`sklearn.utils.resample`
    """
    options['replace'] = False
    return resample(*arrays, **options)
def safe_sqr(X, copy=True):
    """Element wise squaring of array-likes and sparse matrices.
    Parameters
    ----------
    X : array like, matrix, sparse matrix
    Returns
    -------
    X ** 2 : element wise square
    """
    X = safe_asarray(X)
    if issparse(X):
        if copy:
            X = X.copy()
        X.data **= 2
    else:
        if copy:
            X = X ** 2
        else:
            X **= 2
    return X
def gen_even_slices(n, n_packs):
    """Generator to create n_packs slices going up to n.
    Examples
    --------
    >>> from sklearn.utils import gen_even_slices
    >>> list(gen_even_slices(10, 1))
    [slice(0, 10, None)]
    >>> list(gen_even_slices(10, 10))                     #doctest: +ELLIPSIS
    [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
    >>> list(gen_even_slices(10, 5))                      #doctest: +ELLIPSIS
    [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
    >>> list(gen_even_slices(10, 3))
    [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
    """
    start = 0
    for pack_num in range(n_packs):
        this_n = n // n_packs
        if pack_num < n % n_packs:
            this_n += 1
        if this_n > 0:
            end = start + this_n
            yield slice(start, end, None)
            start = end
def tosequence(x):
    """Cast iterable x to a Sequence, avoiding a copy if possible."""
    if isinstance(x, np.ndarray):
        return np.asarray(x)
    elif isinstance(x, Sequence):
        return x
    else:
        return list(x)
class ConvergenceWarning(Warning):
    "Custom warning to capture convergence problems"
 | 
	bsd-3-clause | 
| 
	mne-tools/mne-tools.github.io | 
	0.20/_downloads/76822bb92a8465181ec2a7ee96ca8cf4/plot_decoding_csp_timefreq.py | 
	1 | 
	6457 | 
	"""
============================================================================
Decoding in time-frequency space data using the Common Spatial Pattern (CSP)
============================================================================
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
"""
# Authors: Laura Gwilliams <laura.gwilliams@nyu.edu>
#          Jean-Remi King <jeanremi.king@gmail.com>
#          Alex Barachant <alexandre.barachant@gmail.com>
#          Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, create_info, events_from_annotations
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
###############################################################################
# Set parameters and read data
event_id = dict(hands=2, feet=3)  # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
# Extract information from the raw file
sfreq = raw.info['sfreq']
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False),
                    LinearDiscriminantAnalysis())
n_splits = 5  # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
# Classification & Time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10.  # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8  # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs)  # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:]))  # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
###############################################################################
# Loop through frequencies, apply classifier and save scores
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
    # Infer window size based on the frequency being used
    w_size = n_cycles / ((fmax + fmin) / 2.)  # in seconds
    # Apply band-pass filter to isolate the specified frequencies
    raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
                                   skip_by_annotation='edge')
    # Extract epochs from filtered data, padded by window size
    epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
                    proj=False, baseline=None, preload=True)
    epochs.drop_bad()
    y = le.fit_transform(epochs.events[:, 2])
    X = epochs.get_data()
    # Save mean scores over folds for each frequency and time window
    freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
                                                scoring='roc_auc', cv=cv,
                                                n_jobs=1), axis=0)
###############################################################################
# Plot frequency results
plt.bar(freqs[:-1], freq_scores, width=np.diff(freqs)[0],
        align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
            label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
###############################################################################
# Loop through frequencies and time, apply classifier and save scores
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
    # Infer window size based on the frequency being used
    w_size = n_cycles / ((fmax + fmin) / 2.)  # in seconds
    # Apply band-pass filter to isolate the specified frequencies
    raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
                                   skip_by_annotation='edge')
    # Extract epochs from filtered data, padded by window size
    epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
                    proj=False, baseline=None, preload=True)
    epochs.drop_bad()
    y = le.fit_transform(epochs.events[:, 2])
    # Roll covariance, csp and lda over time
    for t, w_time in enumerate(centered_w_times):
        # Center the min and max of the window
        w_tmin = w_time - w_size / 2.
        w_tmax = w_time + w_size / 2.
        # Crop data into time-window of interest
        X = epochs.copy().crop(w_tmin, w_tmax).get_data()
        # Save mean scores over folds for each frequency and time window
        tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
                                                     scoring='roc_auc', cv=cv,
                                                     n_jobs=1), axis=0)
###############################################################################
# Plot time-frequency results
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
                    centered_w_times, freqs[1:], 1)
chance = np.mean(y)  # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
            cmap=plt.cm.Reds)
 | 
	bsd-3-clause | 
| 
	bijanfallah/OI_CCLM | 
	src/RMSE_MAPS_INGO.py | 
	1 | 
	2007 | 
	# Program to show the maps of RMSE averaged over time
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import os
from netCDF4 import Dataset as NetCDFFile
import numpy as np
from CCLM_OUTS import Plot_CCLM
# option == 1 ->  shift 4 with default cclm domain and nboundlines = 3
# option == 2 ->  shift 4 with smaller cclm domain and nboundlines = 3
# option == 3 ->  shift 4 with smaller cclm domain and nboundlines = 6
# option == 4 ->  shift 4 with corrected smaller cclm domain and nboundlines = 3
# option == 5 ->  shift 4 with corrected smaller cclm domain and nboundlines = 4
# option == 6 ->  shift 4 with corrected smaller cclm domain and nboundlines = 6
# option == 7 ->  shift 4 with corrected smaller cclm domain and nboundlines = 9
# option == 8 ->  shift 4 with corrected bigger cclm domain and nboundlines = 3
from CCLM_OUTS import Plot_CCLM
#def f(x):
#   if x==-9999:
#      return float('NaN')
#   else:
#      return x
def read_data_from_mistral(dir='/work/bb1029/b324045/work1/work/member/post/',name='member_T_2M_ts_seasmean.nc',var='T_2M'):
    # type: (object, object, object) -> object
    #a function to read the data from mistral work
    """
    :rtype: object
    """
    #CMD = 'scp $mistral:' + dir + name + ' ./'
    CMD = 'wget users.met.fu-berlin.de/~BijanFallah/' + dir + name
    os.system(CMD)
    nc = NetCDFFile(name)
#    for name2, variable in nc.variables.items():
#        for attrname in variable.ncattrs():
#                    print(name2, variable, '-----------------',attrname)
#                    #print("{} -- {}".format(attrname, getattr(variable, attrname)))
    os.remove(name)
    lats = nc.variables['lat'][:]
    lons = nc.variables['lon'][:]
    t = nc.variables[var][:].squeeze()
    rlats = nc.variables['rlat'][:]  # extract/copy the data
    rlons = nc.variables['rlon'][:]
    #f2 = np.vectorize(f)
    #t= f2(t)
    #t=t.data
    t=t.squeeze()
    #print()
    nc.close()
    return(t, lats, lons, rlats, rlons)
 | 
	mit | 
| 
	hsu/chrono | 
	src/demos/trackVehicle/validationPlots_test_M113.py | 
	5 | 
	4229 | 
	# -*- coding: utf-8 -*-
"""
Created on Wed May 06 11:00:53 2015
@author: newJustin
"""
import ChronoTrack_pandas as CT
import pylab as py
      
if __name__ == '__main__':
    
    # logger
    import logging as lg
    
    lg.basicConfig(fileName = 'logFile.log', level=lg.WARN, format='%(message)s')
    # default font size
    import matplotlib
    font = {'size' : 14}
    matplotlib.rc('font', **font)       
    
    #  **********************************************************************    
    #  ===============   USER INPUT   =======================================
    # data dir, end w/ '/'
    # data_dir = 'D:/Chrono_github_Build/bin/outdata_M113/'
    data_dir = 'E:/Chrono_github_Build/bin/outdata_M113/'    
    
    '''    
    # list of data files to plot
    chassis = 'M113_chassis.csv'
    gearSubsys = 'M113_Side0_gear.csv'
    idlerSubsys = 'M113_Side0_idler.csv'
    # ptrainSubsys = 'test_driveChain_ptrain.csv'
    shoe0 = 'M113_Side0_shoe0.csv'
    '''
    chassis = 'M113_400_200__chassis.csv'
    gearSubsys = 'M113_400_200__Side0_gear.csv'
    idlerSubsys = 'M113_400_200__Side0_idler.csv'
    # ptrainSubsys = 'test_driveChain_ptrain.csv'
    shoe0 = 'M113_400_200__Side0_shoe0.csv'    
    data_files = [data_dir + chassis, data_dir + gearSubsys, data_dir + idlerSubsys, data_dir + shoe0]
    handle_list = ['chassis','gear','idler','shoe0']
    # handle_list = ['Gear','idler','ptrain','shoe0','gearCV','idlerCV','rollerCV','gearContact','shoeGearContact']
    
    
    '''
    gearCV = 'test_driveChain_GearCV.csv'
    idlerCV = 'test_driveChain_idler0CV.csv'
    rollerCV = 'test_driveChain_roller0CV.csv'
    gearContact = 'test_driveChain_gearContact.csv'
    shoeGearContact = 'test_driveChain_shoe0GearContact.csv'
    '''
    
    # data_files = [data_dir + gearSubsys, data_dir + idlerSubsys, data_dir + ptrainSubsys, data_dir + shoe0, data_dir + gearCV, data_dir + idlerCV, data_dir + rollerCV, data_dir + gearContact, data_dir+shoeGearContact]
    # handle_list = ['Gear','idler','ptrain','shoe0','gearCV','idlerCV','rollerCV','gearContact','shoeGearContact']
    
    # list of data files for gear/pin comparison plots    
    #  Primitive gear geometry
    '''
    gear = 'driveChain_P_gear.csv'
    gearContact = 'driveChain_P_gearContact.csv'
    shoe = 'driveChain_P_shoe0.csv'
    shoeContact = 'driveChain_P_shoe0GearContact.csv'
    ptrain = 'driveChain_P_ptrain.csv'    
    
    
    #  Collision Callback gear geometry     
    gear = 'driveChain_CC_gear.csv'
    gearContact = 'driveChain_CC_gearContact.csv'
    shoe = 'driveChain_CC_shoe0.csv'
    shoeContact = 'driveChain_CC_shoe0GearContact.csv'
    ptrain = 'driveChain_CC_ptrain.csv'    
    
    
    data_files = [data_dir+gear, data_dir+gearContact, data_dir+shoe, data_dir+shoeContact, data_dir+ptrain]
   
    handle_list = ['Gear','gearContact','shoe0','shoeGearContact','ptrain']
    '''
 
 
    # construct the panda class for the DriveChain, file list and list of legend
    M113_Chain0 = CT.ChronoTrack_pandas(data_files, handle_list)
    
    # set the time limits. tmin = -1 will plot the entire time range
    tmin = 1.0
    tmax = 8.0
    
    
    #0) plot the chassis
    M113_Chain0.plot_chassis(tmin, tmax)    
    
    # 1) plot the gear body info
    M113_Chain0.plot_gear(tmin, tmax)
    
    
    # 2) plot idler body info, tensioner force
    M113_Chain0.plot_idler(tmin,tmax)
    '''
    # 3) plot powertrain info
    M113_Chain0.plot_ptrain()    
    '''
    
    # 4) plot shoe 0 body info, and pin 0 force/torque
    M113_Chain0.plot_shoe(tmin,tmax)
    
    '''
    # 5) plot gear Constraint Violations
    M113_Chain0.plot_gearCV(tmin,tmax)
    
    # 6) plot idler Constraint Violations
    M113_Chain0.plot_idlerCV(tmin,tmax)
    
    # 7) plot roller Constraint Violations
    M113_Chain0.plot_rollerCV(tmin,tmax)
    
    # 8) from the contact report callback function, gear contact info
    M113_Chain0.plot_gearContactInfo(tmin,tmax)
    # 9)  from shoe-gear report callback function, contact info
    M113_Chain0.plot_shoeGearContactInfo(tmin,tmax)
    '''
    
    # 10) track shoe trajectory: rel-X vs. rel-Y
    M113_Chain0.plot_trajectory(tmin,tmax)
    py.show() | 
	bsd-3-clause | 
| 
	lancezlin/ml_template_py | 
	lib/python2.7/site-packages/sklearn/metrics/tests/test_score_objects.py | 
	15 | 
	17443 | 
	import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
                             log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
                                    _passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
                      'neg_mean_squared_error', 'neg_median_absolute_error',
                      'mean_absolute_error',
                      'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
               'roc_auc', 'average_precision', 'precision',
               'precision_weighted', 'precision_macro', 'precision_micro',
               'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
               'neg_log_loss', 'log_loss',
               'adjusted_rand_score'  # not really, but works
               ]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
    # Make estimators that make sense to test various scoring methods
    sensible_regr = DummyRegressor(strategy='median')
    sensible_regr.fit(X_train, y_train)
    sensible_clf = DecisionTreeClassifier(random_state=0)
    sensible_clf.fit(X_train, y_train)
    sensible_ml_clf = DecisionTreeClassifier(random_state=0)
    sensible_ml_clf.fit(X_train, y_ml_train)
    return dict(
        [(name, sensible_regr) for name in REGRESSION_SCORERS] +
        [(name, sensible_clf) for name in CLF_SCORERS] +
        [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
    )
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
    # Create some memory mapped data
    global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
    TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
    X, y = make_classification(n_samples=30, n_features=5, random_state=0)
    _, y_ml = make_multilabel_classification(n_samples=X.shape[0],
                                             random_state=0)
    filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
    joblib.dump((X, y, y_ml), filename)
    X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
    ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
    global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
    # GC closes the mmap file descriptors
    X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
    shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
    """Dummy estimator to test check_scoring"""
    pass
class EstimatorWithFit(BaseEstimator):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        return self
class EstimatorWithFitAndScore(object):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        return self
    def score(self, X, y):
        return 1.0
class EstimatorWithFitAndPredict(object):
    """Dummy estimator to test check_scoring"""
    def fit(self, X, y):
        self.y = y
        return self
    def predict(self, X):
        return self.y
class DummyScorer(object):
    """Dummy scorer that always returns 1."""
    def __call__(self, est, X, y):
        return 1
def test_all_scorers_repr():
    # Test that all scorers have a working repr
    for name, scorer in SCORERS.items():
        repr(scorer)
def test_check_scoring():
    # Test all branches of check_scoring
    estimator = EstimatorWithoutFit()
    pattern = (r"estimator should be an estimator implementing 'fit' method,"
               r" .* was passed")
    assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
    estimator = EstimatorWithFitAndScore()
    estimator.fit([[1]], [1])
    scorer = check_scoring(estimator)
    assert_true(scorer is _passthrough_scorer)
    assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
    estimator = EstimatorWithFitAndPredict()
    estimator.fit([[1]], [1])
    pattern = (r"If no scoring is specified, the estimator passed should have"
               r" a 'score' method\. The estimator .* does not\.")
    assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
    scorer = check_scoring(estimator, "accuracy")
    assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
    estimator = EstimatorWithFit()
    scorer = check_scoring(estimator, "accuracy")
    assert_true(isinstance(scorer, _PredictScorer))
    estimator = EstimatorWithFit()
    scorer = check_scoring(estimator, allow_none=True)
    assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
    # test that check_scoring works on GridSearchCV and pipeline.
    # slightly redundant non-regression test.
    grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
    scorer = check_scoring(grid, "f1")
    assert_true(isinstance(scorer, _PredictScorer))
    pipe = make_pipeline(LinearSVC())
    scorer = check_scoring(pipe, "f1")
    assert_true(isinstance(scorer, _PredictScorer))
    # check that cross_val_score definitely calls the scorer
    # and doesn't make any assumptions about the estimator apart from having a
    # fit.
    scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
                             scoring=DummyScorer())
    assert_array_equal(scores, 1)
def test_make_scorer():
    # Sanity check on the make_scorer factory function.
    f = lambda *args: 0
    assert_raises(ValueError, make_scorer, f, needs_threshold=True,
                  needs_proba=True)
def test_classification_scores():
    # Test classification scorers.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LinearSVC(random_state=0)
    clf.fit(X_train, y_train)
    for prefix, metric in [('f1', f1_score), ('precision', precision_score),
                           ('recall', recall_score)]:
        score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='weighted')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='macro')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=None,
                        average='micro')
        assert_almost_equal(score1, score2)
        score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
        score2 = metric(y_test, clf.predict(X_test), pos_label=1)
        assert_almost_equal(score1, score2)
    # test fbeta score that takes an argument
    scorer = make_scorer(fbeta_score, beta=2)
    score1 = scorer(clf, X_test, y_test)
    score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
    assert_almost_equal(score1, score2)
    # test that custom scorer can be pickled
    unpickled_scorer = pickle.loads(pickle.dumps(scorer))
    score3 = unpickled_scorer(clf, X_test, y_test)
    assert_almost_equal(score1, score3)
    # smoke test the repr:
    repr(fbeta_score)
def test_regression_scorers():
    # Test regression scorers.
    diabetes = load_diabetes()
    X, y = diabetes.data, diabetes.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = Ridge()
    clf.fit(X_train, y_train)
    score1 = get_scorer('r2')(clf, X_test, y_test)
    score2 = r2_score(y_test, clf.predict(X_test))
    assert_almost_equal(score1, score2)
def test_thresholded_scorers():
    # Test scorers that take thresholds.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LogisticRegression(random_state=0)
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.decision_function(X_test))
    score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)
    assert_almost_equal(score1, score3)
    logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
    logloss = log_loss(y_test, clf.predict_proba(X_test))
    assert_almost_equal(-logscore, logloss)
    # same for an estimator without decision_function
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
    assert_almost_equal(score1, score2)
    # test with a regressor (no decision_function)
    reg = DecisionTreeRegressor()
    reg.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(reg, X_test, y_test)
    score2 = roc_auc_score(y_test, reg.predict(X_test))
    assert_almost_equal(score1, score2)
    # Test that an exception is raised on more than two classes
    X, y = make_blobs(random_state=0, centers=3)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf.fit(X_train, y_train)
    assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
    # Test that the scorer work with multilabel-indicator format
    # for multilabel and multi-output multi-class classifier
    X, y = make_multilabel_classification(allow_unlabeled=False,
                                          random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    # Multi-output multi-class predict_proba
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    y_proba = clf.predict_proba(X_test)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
    assert_almost_equal(score1, score2)
    # Multi-output multi-class decision_function
    # TODO Is there any yet?
    clf = DecisionTreeClassifier()
    clf.fit(X_train, y_train)
    clf._predict_proba = clf.predict_proba
    clf.predict_proba = None
    clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
    y_proba = clf.decision_function(X_test)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
    assert_almost_equal(score1, score2)
    # Multilabel predict_proba
    clf = OneVsRestClassifier(DecisionTreeClassifier())
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
    assert_almost_equal(score1, score2)
    # Multilabel decision function
    clf = OneVsRestClassifier(LinearSVC(random_state=0))
    clf.fit(X_train, y_train)
    score1 = get_scorer('roc_auc')(clf, X_test, y_test)
    score2 = roc_auc_score(y_test, clf.decision_function(X_test))
    assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
    # Test clustering scorers against gold standard labeling.
    # We don't have any real unsupervised Scorers yet.
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    km = KMeans(n_clusters=3)
    km.fit(X_train)
    score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
    score2 = adjusted_rand_score(y_test, km.predict(X_test))
    assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
    # Test that when a list of scores is returned, we raise proper errors.
    X, y = make_blobs(random_state=0)
    f1_scorer_no_average = make_scorer(f1_score, average=None)
    clf = DecisionTreeClassifier()
    assert_raises(ValueError, cross_val_score, clf, X, y,
                  scoring=f1_scorer_no_average)
    grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
                               param_grid={'max_depth': [1, 2]})
    assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
    # Test that scorers support sample_weight or raise sensible errors
    # Unlike the metrics invariance test, in the scorer case it's harder
    # to ensure that, on the classifier output, weighted and unweighted
    # scores really should be unequal.
    X, y = make_classification(random_state=0)
    _, y_ml = make_multilabel_classification(n_samples=X.shape[0],
                                             random_state=0)
    split = train_test_split(X, y, y_ml, random_state=0)
    X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
    sample_weight = np.ones_like(y_test)
    sample_weight[:10] = 0
    # get sensible estimators for each metric
    estimator = _make_estimators(X_train, y_train, y_ml_train)
    for name, scorer in SCORERS.items():
        if name in MULTILABEL_ONLY_SCORERS:
            target = y_ml_test
        else:
            target = y_test
        try:
            weighted = scorer(estimator[name], X_test, target,
                              sample_weight=sample_weight)
            ignored = scorer(estimator[name], X_test[10:], target[10:])
            unweighted = scorer(estimator[name], X_test, target)
            assert_not_equal(weighted, unweighted,
                             msg="scorer {0} behaves identically when "
                             "called with sample weights: {1} vs "
                             "{2}".format(name, weighted, unweighted))
            assert_almost_equal(weighted, ignored,
                                err_msg="scorer {0} behaves differently when "
                                "ignoring samples and setting sample_weight to"
                                " 0: {1} vs {2}".format(name, weighted,
                                                        ignored))
        except TypeError as e:
            assert_true("sample_weight" in str(e),
                        "scorer {0} raises unhelpful exception when called "
                        "with sample weights: {1}".format(name, str(e)))
@ignore_warnings  # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
    scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
    if scorer_name in MULTILABEL_ONLY_SCORERS:
        score = scorer(estimator, X_mm, y_ml_mm)
    else:
        score = scorer(estimator, X_mm, y_mm)
    assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
    # Non-regression test for #6147: some score functions would
    # return singleton memmap when computed on memmap data instead of scalar
    # float values.
    for name in SCORERS.keys():
        yield check_scorer_memmap, name
def test_deprecated_names():
    X, y = make_blobs(random_state=0, centers=2)
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    clf = LogisticRegression(random_state=0)
    clf.fit(X_train, y_train)
    for name in ('mean_absolute_error', 'mean_squared_error',
                 'median_absolute_error', 'log_loss'):
        warning_msg = "Scoring method %s was renamed to" % name
        for scorer in (get_scorer(name), SCORERS[name]):
            assert_warns_message(DeprecationWarning,
                                 warning_msg,
                                 scorer, clf, X, y)
        assert_warns_message(DeprecationWarning,
                             warning_msg,
                             cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         LogisticRegression(), f1_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         LogisticRegression(), roc_auc_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         Ridge(), r2_score)
    assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
                         KMeans(), adjusted_rand_score)
 | 
	mit | 
| 
	wavelets/zipline | 
	zipline/examples/dual_ema_talib.py | 
	2 | 
	3230 | 
	#!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Import exponential moving average from talib wrapper
from zipline.transforms.ta import EMA
from datetime import datetime
import pytz
class DualEMATaLib(TradingAlgorithm):
    """Dual Moving Average Crossover algorithm.
    This algorithm buys apple once its short moving average crosses
    its long moving average (indicating upwards momentum) and sells
    its shares once the averages cross again (indicating downwards
    momentum).
    """
    def initialize(self, short_window=20, long_window=40):
        # Add 2 mavg transforms, one with a long window, one
        # with a short window.
        self.short_ema_trans = EMA(timeperiod=short_window)
        self.long_ema_trans = EMA(timeperiod=long_window)
        # To keep track of whether we invested in the stock or not
        self.invested = False
    def handle_data(self, data):
        self.short_ema = self.short_ema_trans.handle_data(data)
        self.long_ema = self.long_ema_trans.handle_data(data)
        if self.short_ema is None or self.long_ema is None:
            return
        self.buy = False
        self.sell = False
        if (self.short_ema > self.long_ema).all() and not self.invested:
            self.order('AAPL', 100)
            self.invested = True
            self.buy = True
        elif (self.short_ema < self.long_ema).all() and self.invested:
            self.order('AAPL', -100)
            self.invested = False
            self.sell = True
        self.record(AAPL=data['AAPL'].price,
                    short_ema=self.short_ema['AAPL'],
                    long_ema=self.long_ema['AAPL'],
                    buy=self.buy,
                    sell=self.sell)
if __name__ == '__main__':
    start = datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
    end = datetime(1991, 1, 1, 0, 0, 0, 0, pytz.utc)
    data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
                           end=end)
    dma = DualEMATaLib()
    results = dma.run(data).dropna()
    fig = plt.figure()
    ax1 = fig.add_subplot(211, ylabel='portfolio value')
    results.portfolio_value.plot(ax=ax1)
    ax2 = fig.add_subplot(212)
    results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
    ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
             '^', markersize=10, color='m')
    ax2.plot(results.ix[results.sell].index, results.short_ema[results.sell],
             'v', markersize=10, color='k')
    plt.legend(loc=0)
    plt.gcf().set_size_inches(18, 8)
 | 
	apache-2.0 | 
| 
	ChanChiChoi/scikit-learn | 
	examples/model_selection/plot_roc.py | 
	146 | 
	3697 | 
	"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
.. note::
    See also :func:`sklearn.metrics.roc_auc_score`,
             :ref:`example_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
                                                    random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
                                 random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
    fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
    roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
# Plot ROC curve
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
         label='micro-average ROC curve (area = {0:0.2f})'
               ''.format(roc_auc["micro"]))
for i in range(n_classes):
    plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
                                   ''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
 | 
	bsd-3-clause | 
| 
	rcolasanti/CompaniesHouseScraper | 
	DVLACompanyNmeMatchCoHoAPIFindMissing.py | 
	1 | 
	5174 | 
	
import requests
import json
import numpy as np
import pandas as pd
import CoHouseToken
from difflib import SequenceMatcher
# In[3]:
def exactMatch(line1, line2):
    line1=line1.upper().rstrip()    
    line2=line2.upper().rstrip()
    #print("|"+line1+"|"+line2+"|",line1==line2)
    return line1==line2
def aStopWord(word):
    return word.upper().replace("COMPANY","CO").replace("LIMITED","LTD").replace("&","AND").rstrip() 
def spaces(word):
    w = word.upper().replace("/"," ")
    w = w.replace("."," ").replace(","," ").replace("-"," ").rstrip() 
    return w
def removeAStopWord(word):
    w = word.upper().replace("LTD"," ").replace("CO"," ").replace("AND"," ").replace("("," ").replace("/"," ")
    w = w.replace(")"," ").replace("."," ").replace(","," ").replace("-"," ").rstrip() 
    return w
def removeABlank(word):
    w = word.replace(" ","")
    return w
def removeABracket (line):
    flag = False
    word=""
    for a in line:
        if a=="(":
            flag = True
            a=""
        if a==")":
            a=""
            flag = False
        if flag:
            a=""
        word+=a
    return word
    
def stopWord(line1, line2):
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    #print("|"+line1+"|"+line2+"|",line1==line2)
    return line1==line2
def removeStopWord(line1, line2):
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    #print("|"+line1+"|"+line2+"|",line1==line2)
    return line1==line2
def removeBlanks(line1, line2):
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    line1=removeABlank(line1)  
    line2=removeABlank(line2)
    return line1==line2
def removeBrackets(line1, line2):
    line1=removeABracket(line1)  
    line2=removeABracket(line2)
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    line1=removeABlank(line1)  
    line2=removeABlank(line2)
   #print("|"+line1+"|"+line2+"|",line1==line2)
    
    return line1==line2
def strip(line1, line2):
    line1=removeABracket(line1)  
    line2=removeABracket(line2)
    line1=spaces(line1)  
    line2=spaces(line2)
    line1=aStopWord(line1)  
    line2=aStopWord(line2)
    line1=removeAStopWord(line1)  
    line2=removeAStopWord(line2)
    line1=removeABlank(line1)  
    line2=removeABlank(line2)
    
    return line1,line2
def match(company,results):
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(exactMatch(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(stopWord(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(removeStopWord(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(removeBlanks(company,line)):
            return True,line,number
            
    for i in results['items']:
        line = i['title']
        number = i['company_number']
        if(removeBrackets(company,line)):
            return True,line,number
        
        #old_match(company,results)
    return False,"",""
def main(args):
    print(args[0])
    search_url ="https://api.companieshouse.gov.uk/search/companies?q="
    token = CoHouseToken.getToken()
    pw = ''
    base_url = 'https://api.companieshouse.gov.uk'
    file = args[1]
    print(file)
    df = pd.read_csv(file,names=['Organisation'])
    companies = df.Organisation
    count=0
    found = open("found2.csv",'w')
    missing = open("missing2.csv",'w')
    for c in companies:
        c =c.upper().replace("&","AND")
        c = c.split(" T/A ")[0]
        c = c.split("WAS ")[0]
        c= spaces(c)
        url=search_url+c
        results = json.loads(requests.get(url, auth=(token,pw)).text)
        for i , key  in enumerate(results['items']):
            a,b = strip(c, key['title'])
            r = SequenceMatcher(None, a, b).ratio()
            print("%s \t %s\t %.2f \t %s \t %s"%(i,c,r,key['company_number'],key['title']))
        
        v = input('type number or return to reject: ')
        if v =="":
            print("reject")
            missing.write("%s\n"%(c))
        else:
            key = results['items'][int(v)]
            print("%s \t %s\t %.2f \t %s \t %s"%(v,c,r,key['company_number'],key['title']))
            print("*************************")
            found.write("%s,%s,%s,\n"%(c,key['title'],key['company_number']))
        
            
    print()
    #print(count/len(companies))
    return 0
if __name__ == '__main__':
    import sys
    sys.exit(main(sys.argv))
 | 
	gpl-3.0 | 
| 
	mclaughlin6464/pasta | 
	pasta/ising.py | 
	1 | 
	5474 | 
	'''
This is a dummy file for me to get started making an Ising model. I'll get this 2-D Ising running, then generalize.
'''
import argparse
from itertools import izip
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
def run_ising(N, d, K, J,h, n_steps, plot = False):
    '''
    :param N:
    :param d:
    :param K:
    :param J:
    :param h:
    :param n_steps:
    :param plot:
    :return:
    '''
    if plot:
        try:
            assert d <= 2
        except AssertionError:
            raise AssertionError("Can only plot in one or two dimensions.")
    #TODO wrap these better
    assert N >0 and N < 1000
    assert d > 0
    assert n_steps > 0
    np.random.seed(0)
    size = tuple(N for i in xrange(d))
    lattice = np.ones(size)
    #make a random initial state
    lattice-= np.random.randint(0,2, size =size)*2
    # do different initialization
    E_0 = energy(lattice, potential, K, h)
    if plot:
        plt.ion()
    for step in xrange(n_steps):
        if step%1000 == 0:
            print step
        site = tuple(np.random.randint(0, N, size=d))
        # consider flipping this site
        lattice[site] *= -1
        E_f = energy(lattice, potential, K, h)
        # if E_F < E_0, keep
        # if E_F > E_0, keep randomly given change of energies
        if E_f >= E_0:
            keep = np.random.uniform() < np.exp(K / J * (E_0 - E_f))
        else:
            keep = True
        if keep:
            E_0 = E_f
        else:
            lattice[site] *= -1
        # fig = plt.figure()
        if plot and step % 100 == 0:
            if d == 1:
                plt.imshow(lattice.reshape((1, -1)),interpolation='none')
            else:
                plt.imshow(lattice, interpolation='none')
            plt.title(correlation(lattice, N/2))
            plt.pause(0.01)
            plt.clf()
    return np.array([correlation(lattice, r) for r in xrange(1, N/2+1)])
def get_NN(site, N, d, r= 1):
    '''
    The NN of the site. Will only return those UP in index (east, south, and down) to avoid double counting.
    Accounts for PBC
    :param site:
        (d,) array of coordinates in the lattice
    :param N:
        Size of one side of the lattice
    :param d:
        dimension of the lattice
    :return:
        dxd numpy array where each row corresponds to the nearest neighbors.
    '''
    mult_sites = np.r_[ [site for i in xrange(d)]]
    adjustment = np.eye(d)*r
    return ((mult_sites+adjustment)%N).astype(int)
def potential(s1, s2, K, h):
    '''
    Basic Ising potential
    :param s1:
        First spin (-1 or 1)
    :param s2:
        Second spin
    :param K:
        Coupling constant
    :return:
        Energy of this particular bond
    '''
    return -1*K*s1*s2 - h/2*(s1+s2)#should this be abstracted to call the NN function?
def energy(lattice, potential, K, h = 0):
    '''
    Calculate the energy of a lattice
    :param lattice:
        Lattice to calculate the energy on
    :param potential:
        Function defining the potential of a given site.
    :return:
        Energy of the lattice
    '''
    N = lattice.shape[0]
    d = len(lattice.shape)
    dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing = 'ij')
    all_sites = izip(*[slice.flatten() for slice in dim_slices])
    E = 0
    for site in all_sites:
        nn = get_NN(site, N, d)
        for neighbor in nn:
            E+=potential(lattice[site], lattice[tuple(neighbor)],K = K, h = h)
    return E
def magnetization(lattice):
    return lattice.mean()
def correlation(lattice, r):
    '''
    The average spin correlation at distance r.
    :param lattice:
        The lattice to calculate the statistic on.
    :param r:
        Distance to measure correlation
    :return:
    '''
    N = lattice.shape[0]
    d = len(lattice.shape)
    dim_slices = np.meshgrid(*(xrange(N) for i in xrange(d)), indexing='ij')
    all_sites = izip(*[slice.flatten() for slice in dim_slices])
    xi = 0
    for site in all_sites:
        nn = get_NN(site, N, d, r)
        for neighbor in nn:
            xi += lattice[site]*lattice[tuple(neighbor)]
    return xi/((N**d)*d)
if __name__  == '__main__':
    parser = argparse.ArgumentParser(description='Simulate an ising model')
    parser.add_argument('N', type = int, help = 'Length of one side of the cube.')
    parser.add_argument('d', type = int, help = 'Number of dimensions of the cube.')
    #parser.add_argument('K', type = float, help ='Bond coupling strength.')
    parser.add_argument('J', type = float, default = 1.0, nargs = '?',\
                        help = 'Energy of bond strength. Optional, default is 1.')
    parser.add_argument('h', type = float, default=0.0, nargs = '?',\
                        help = 'Magnetic field strength. Optional, default is 0.')
    parser.add_argument('n_steps', type = int, default = 1000, nargs = '?',\
                        help = 'Number of steps to simulate. Default is 1e5')
    parser.add_argument('--plot', action = 'store_true',\
                        help = 'Whether or not to plot results. Only allowed with d = 1 or 2.')
    args = parser.parse_args()
    spins = []
    Ks = [ 0.5,0.6,0.65, 0.7,0.8, 0.9]
    for K in Ks:
        print K
        spins.append(run_ising(K = K, **vars(args)))
    for K, spin in izip(Ks, spins):
        plt.plot(spin, label = K )
    plt.legend(loc = 'best')
    plt.ylim([-0.1, 1.1])
    plt.show() | 
	mit | 
| 
	nicholaschris/landsatpy | 
	stuff.py | 
	1 | 
	1864 | 
	import cloud_detection_new as cloud_detection
from matplotlib import pyplot as plt
import views
from skimage import exposure
nir = cloud_detection.get_nir()[0:600,2000:2600]
red = cloud_detection.get_red()[0:600,2000:2600]
green = cloud_detection.get_green()[0:600,2000:2600]
blue = cloud_detection.get_blue()[0:600,2000:2600] # or use coastal
coastal = cloud_detection.get_coastal()[0:600,2000:2600]
marine_shadow_index = (green-blue)/(green+blue)
img = views.create_composite(red, green, blue)
img_rescale = exposure.rescale_intensity(img, in_range=(0, 90))
plt.rcParams['savefig.facecolor'] = "0.8"
vmin, vmax=0.0,0.1
def example_plot(ax, data, fontsize=12):
     ax.imshow(data, vmin=vmin, vmax=vmax)
     ax.locator_params(nbins=3)
     ax.set_xlabel('x-label', fontsize=fontsize)
     ax.set_ylabel('y-label', fontsize=fontsize)
     ax.set_title('Title', fontsize=fontsize)
plt.close('all')
fig = plt.figure
ax1=plt.subplot(243)
ax2=plt.subplot(244)
ax3=plt.subplot(247)
ax4=plt.subplot(248)
ax5=plt.subplot(121)
a_coastal = coastal[500:600, 500:600]
a_blue = blue[500:600, 500:600]
a_green = green[500:600, 500:600]
a_red = red[500:600, 500:600]
a_nir = nir[500:600, 500:600]
a_img = img[500:600, 500:600]
spec1 = [a_coastal[60, 60], a_blue[60, 60], a_green[60, 60], a_red[60, 60], a_nir[60, 60]]
b_coastal = coastal[200:300, 100:200]
b_blue = blue[200:300, 100:200]
b_green = green[200:300, 100:200]
b_red = red[200:300, 100:200]
b_nir = nir[200:300, 100:200]
b_img = img[200:300, 100:200]
example_plot(ax1, coastal)
example_plot(ax2, blue)
example_plot(ax3, green)
example_plot(ax4, red)
ax5.imshow(img)
# plt.tight_layout()
plt.close('all')
spec = [b_coastal[60, 60], b_blue[60, 60], b_green[60, 60], b_red[60, 60], b_nir[60, 60]]
plt.plot(spec, 'k*-')
plt.plot(spec1, 'k.-')
plt.close('all')
cbg = (coastal+blue+green)/3
plt.imshow(cbg/red) | 
	mit | 
| 
	Monika319/EWEF-1 | 
	Cw2Rezonans/Karolina/Oscyloskop/OscyloskopZ5W2.py | 
	1 | 
	1312 | 
	# -*- coding: utf-8 -*-
"""
Plot oscilloscope files from MultiSim
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
from matplotlib import rc
rc('font',family="Consolas")
files=["real_zad5_05f_p2.txt"]
for NazwaPliku in files:
    print NazwaPliku
    Plik=open(NazwaPliku)
    #print DeltaT
    Dane=Plik.readlines()#[4:]
    DeltaT=float(Dane[2].split()[3].replace(",","."))
    #M=len(Dane[4].split())/2
    M=2
    Dane=Dane[5:]
    Plik.close()
    print M
    Ys=[np.zeros(len(Dane)) for i in range(M)]
    for m in range(M):
        for i in range(len(Dane)):
            try:
                Ys[m][i]=float(Dane[i].split()[2+3*m].replace(",","."))
            except:
                print m, i, 2+3*m, len(Dane[i].split()), Dane[i].split()
        #print i, Y[i]
    X=np.zeros_like(Ys[0])
    for i in range(len(X)):
        X[i]=i*DeltaT
    for y in Ys:
        print max(y)-min(y)
    Opis=u"Układ szeregowy\nPołowa częstotliwości rezonansowej"
    Nazwa=u"Z5W2"
    plt.title(u"Przebieg napięciowy\n"+Opis)
    plt.xlabel(u"Czas t [s]")
    plt.ylabel(u"Napięcie [V]")
    plt.plot(X,Ys[0],label=u"Wejście")
    plt.plot(X,Ys[1],label=u"Wyjście")
    plt.grid()
    plt.legend(loc="best")
    plt.savefig(Nazwa + ".png", bbox_inches='tight')
    plt.show()
 | 
	gpl-2.0 | 
| 
	nddsg/TreeDecomps | 
	xplodnTree/tdec/b2CliqueTreeRules.py | 
	1 | 
	3569 | 
	#!/usr/bin/env python
__author__ = 'saguinag' + '@' + 'nd.edu'
__version__ = "0.1.0"
##
## fname "b2CliqueTreeRules.py"
##
## TODO: some todo list
## VersionLog:
import net_metrics as metrics
import pandas as pd
import argparse, traceback
import os, sys
import networkx as nx
import re
from collections import deque, defaultdict, Counter
import tree_decomposition as td
import PHRG as phrg
import probabilistic_cfg as pcfg
import exact_phrg as xphrg
import a1_hrg_cliq_tree as nfld
from a1_hrg_cliq_tree import load_edgelist
DEBUG = False
def get_parser ():
  parser = argparse.ArgumentParser(description='b2CliqueTreeRules.py: given a tree derive grammar rules')
  parser.add_argument('-t', '--treedecomp', required=True, help='input tree decomposition (dimacs file format)')
  parser.add_argument('--version', action='version', version=__version__)
  return parser
def dimacs_td_ct (tdfname):
  """ tree decomp to clique-tree """
  print '... input file:', tdfname
  fname = tdfname
  graph_name = os.path.basename(fname)
  gname = graph_name.split('.')[0]
  gfname = "datasets/out." + gname
  tdh = os.path.basename(fname).split('.')[1] # tree decomp heuristic
  tfname = gname+"."+tdh
  G = load_edgelist(gfname)
  if DEBUG: print nx.info(G)
  print
  with open(fname, 'r') as f:  # read tree decomp from inddgo
    lines = f.readlines()
    lines = [x.rstrip('\r\n') for x in lines]
  cbags = {}
  bags = [x.split() for x in lines if x.startswith('B')]
  for b in bags:
    cbags[int(b[1])] = [int(x) for x in b[3:]]  # what to do with bag size?
  edges = [x.split()[1:] for x in lines if x.startswith('e')]
  edges = [[int(k) for k in x] for x in edges]
  tree = defaultdict(set)
  for s, t in edges:
    tree[frozenset(cbags[s])].add(frozenset(cbags[t]))
    if DEBUG: print '.. # of keys in `tree`:', len(tree.keys())
  if DEBUG: print tree.keys()
  root = list(tree)[0]
  if DEBUG: print '.. Root:', root
  root = frozenset(cbags[1])
  if DEBUG: print '.. Root:', root
  T = td.make_rooted(tree, root)
  if DEBUG: print '.. T rooted:', len(T)
  # nfld.unfold_2wide_tuple(T) # lets me display the tree's frozen sets
  T = phrg.binarize(T)
  prod_rules = {}
  td.new_visit(T, G, prod_rules)
  if DEBUG: print "--------------------"
  if DEBUG: print "- Production Rules -"
  if DEBUG: print "--------------------"
  for k in prod_rules.iterkeys():
    if DEBUG: print k
    s = 0
    for d in prod_rules[k]:
      s += prod_rules[k][d]
    for d in prod_rules[k]:
      prod_rules[k][d] = float(prod_rules[k][d]) / float(s)  # normailization step to create probs not counts.
      if DEBUG: print '\t -> ', d, prod_rules[k][d]
  rules = []
  id = 0
  for k, v in prod_rules.iteritems():
    sid = 0
    for x in prod_rules[k]:
      rhs = re.findall("[^()]+", x)
      rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
      if DEBUG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
      sid += 1
    id += 1
  df = pd.DataFrame(rules)
  outdf_fname = "./ProdRules/"+tfname+".prules"
  if not os.path.isfile(outdf_fname+".bz2"):
    print '...',outdf_fname, "written"
    df.to_csv(outdf_fname+".bz2", compression="bz2")
  else:
    print '...', outdf_fname, "file exists"
  return
def main ():
  parser = get_parser()
  args = vars(parser.parse_args())
  dimacs_td_ct(args['treedecomp'])  # gen synth graph
if __name__ == '__main__':
  try:
    main()
  except Exception, e:
    print str(e)
    traceback.print_exc()
    sys.exit(1)
  sys.exit(0)
 | 
	mit | 
No dataset card yet