signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def blogroll ( request , btype ) : 'View that handles the generation of blogrolls .'
response , site , cachekey = initview ( request ) if response : return response [ 0 ] template = loader . get_template ( 'feedjack/{0}.xml' . format ( btype ) ) ctx = dict ( ) fjlib . get_extra_context ( site , ctx ) ctx = Context ( ctx ) response = HttpResponse ( template . render ( ctx ) , content_type = 'text/xml; charset=utf-8' ) patch_vary_headers ( response , [ 'Host' ] ) fjcache . cache_set ( site , cachekey , ( response , ctx_get ( ctx , 'last_modified' ) ) ) return response
def get_scale_text ( self ) : """Report current scaling in human - readable format . Returns text : str ` ` ' < num > x ' ` ` if enlarged , or ` ` ' 1 / < num > x ' ` ` if shrunken ."""
scalefactor = self . get_scale_max ( ) if scalefactor >= 1.0 : text = '%.2fx' % ( scalefactor ) else : text = '1/%.2fx' % ( 1.0 / scalefactor ) return text
def mknts ( self , add_dct ) : """Add information from add _ dct to a new copy of namedtuples stored in nts ."""
nts = [ ] assert len ( add_dct ) == len ( self . nts ) flds = list ( next ( iter ( self . nts ) ) . _fields ) + list ( next ( iter ( add_dct ) ) . keys ( ) ) ntobj = cx . namedtuple ( "ntgoea" , " " . join ( flds ) ) for dct_new , ntgoea in zip ( add_dct , self . nts ) : dct_curr = ntgoea . _asdict ( ) for key , val in dct_new . items ( ) : dct_curr [ key ] = val nts . append ( ntobj ( ** dct_curr ) ) return nts
def power_chisq_at_points_from_precomputed ( corr , snr , snr_norm , bins , indices ) : """Calculate the chisq timeseries from precomputed values for only select points . This function calculates the chisq at each point by explicitly time shifting and summing each bin . No FFT is involved . Parameters corr : FrequencySeries The product of the template and data in the frequency domain . snr : numpy . ndarray The unnormalized array of snr values at only the selected points in ` indices ` . snr _ norm : float The normalization of the snr ( EXPLAINME : refer to Findchirp paper ? ) bins : List of integers The edges of the equal power bins indices : Array The indices where we will calculate the chisq . These must be relative to the given ` corr ` series . Returns chisq : Array An array containing only the chisq at the selected points ."""
num_bins = len ( bins ) - 1 chisq = shift_sum ( corr , indices , bins ) # pylint : disable = assignment - from - no - return return ( chisq * num_bins - ( snr . conj ( ) * snr ) . real ) * ( snr_norm ** 2.0 )
def _coo_to_sparse_series ( A , dense_index = False ) : """Convert a scipy . sparse . coo _ matrix to a SparseSeries . Use the defaults given in the SparseSeries constructor ."""
s = Series ( A . data , MultiIndex . from_arrays ( ( A . row , A . col ) ) ) s = s . sort_index ( ) s = s . to_sparse ( ) # TODO : specify kind ? if dense_index : # is there a better constructor method to use here ? i = range ( A . shape [ 0 ] ) j = range ( A . shape [ 1 ] ) ind = MultiIndex . from_product ( [ i , j ] ) s = s . reindex ( ind ) return s
def convert_data_to_ndarray ( self ) : """Converts the data from dataframe to ndarray format . Assumption : df - columns are ndarray - layers ( 3rd dim . )"""
if self . _data_structure != "DataFrame" : raise Exception ( f"Data is not a DataFrame but {self._data_structure}." ) self . _data = self . _convert_to_ndarray ( self . _data ) self . _update_data_structure ( ) return self
def lstm_seq2seq_internal ( inputs , targets , hparams , train ) : """The basic LSTM seq2seq model , main step used for training ."""
with tf . variable_scope ( "lstm_seq2seq" ) : if inputs is not None : inputs_length = common_layers . length_from_embedding ( inputs ) # Flatten inputs . inputs = common_layers . flatten4d3d ( inputs ) # LSTM encoder . inputs = tf . reverse_sequence ( inputs , inputs_length , seq_axis = 1 ) _ , final_encoder_state = lstm ( inputs , inputs_length , hparams , train , "encoder" ) else : final_encoder_state = None # LSTM decoder . shifted_targets = common_layers . shift_right ( targets ) # Add 1 to account for the padding added to the left from shift _ right targets_length = common_layers . length_from_embedding ( shifted_targets ) + 1 decoder_outputs , _ = lstm ( common_layers . flatten4d3d ( shifted_targets ) , targets_length , hparams , train , "decoder" , initial_state = final_encoder_state ) return tf . expand_dims ( decoder_outputs , axis = 2 )
def _get_win_argv ( ) : """Returns a unicode argv under Windows and standard sys . argv otherwise Returns : List [ ` fsnative ` ]"""
assert is_win argc = ctypes . c_int ( ) try : argv = winapi . CommandLineToArgvW ( winapi . GetCommandLineW ( ) , ctypes . byref ( argc ) ) except WindowsError : return [ ] if not argv : return [ ] res = argv [ max ( 0 , argc . value - len ( sys . argv ) ) : argc . value ] winapi . LocalFree ( argv ) return res
def do_install ( ctx , verbose , fake ) : """Installs legit git aliases ."""
click . echo ( 'The following git aliases will be installed:\n' ) aliases = cli . list_commands ( ctx ) output_aliases ( aliases ) if click . confirm ( '\n{}Install aliases above?' . format ( 'FAKE ' if fake else '' ) , default = fake ) : for alias in aliases : cmd = '!legit ' + alias system_command = 'git config --global --replace-all alias.{0} "{1}"' . format ( alias , cmd ) verbose_echo ( system_command , verbose , fake ) if not fake : os . system ( system_command ) if not fake : click . echo ( "\nAliases installed." ) else : click . echo ( "\nAliases will not be installed." )
def get_vmpolicy_macaddr_output_vmpolicy_macaddr_name ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) get_vmpolicy_macaddr = ET . Element ( "get_vmpolicy_macaddr" ) config = get_vmpolicy_macaddr output = ET . SubElement ( get_vmpolicy_macaddr , "output" ) vmpolicy_macaddr = ET . SubElement ( output , "vmpolicy-macaddr" ) name = ET . SubElement ( vmpolicy_macaddr , "name" ) name . text = kwargs . pop ( 'name' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def _getMethodsVoc ( self ) : """Return the registered methods as DisplayList"""
methods = api . search ( { "portal_type" : "Method" , "is_active" : True } , "bika_setup_catalog" ) items = map ( lambda m : ( api . get_uid ( m ) , api . get_title ( m ) ) , methods ) items . sort ( lambda x , y : cmp ( x [ 1 ] , y [ 1 ] ) ) items . insert ( 0 , ( "" , _ ( "Not specified" ) ) ) return DisplayList ( list ( items ) )
def p_inputunit ( p ) : '''inputunit : simple _ list simple _ list _ terminator | NEWLINE | error NEWLINE | EOF'''
# XXX if p . lexer . _parserstate & flags . parser . CMDSUBST : p . lexer . _parserstate . add ( flags . parser . EOFTOKEN ) if isinstance ( p [ 1 ] , ast . node ) : p [ 0 ] = p [ 1 ] # accept right here in case the input contains more lines that are # not part of the current command p . accept ( )
def authorize ( self , me , state = None , next_url = None , scope = 'read' ) : """Authorize a user via Micropub . Args : me ( string ) : the authing user ' s URL . if it does not begin with https ? : / / , http : / / will be prepended . state ( string , optional ) : passed through the whole auth process , useful if you want to maintain some state , e . g . the starting page to return to when auth is complete . next _ url ( string , optional ) : deprecated and replaced by the more general " state " . still here for backward compatibility . scope ( string , optional ) : a space - separated string of micropub scopes . ' read ' by default . Returns : a redirect to the user ' s specified authorization https : / / indieauth . com / auth if none is provided ."""
redirect_url = flask . url_for ( self . flask_endpoint_for_function ( self . _authorized_handler ) , _external = True ) return self . _start_indieauth ( me , redirect_url , state or next_url , scope )
def read ( cls , source , * args , ** kwargs ) : """Read data into a ` StateVector ` Parameters source : ` str ` , ` list ` Source of data , any of the following : - ` str ` path of single data file , - ` str ` path of LAL - format cache file , - ` list ` of paths . channel : ` str ` , ` ~ gwpy . detector . Channel ` the name of the channel to read , or a ` Channel ` object . start : ` ~ gwpy . time . LIGOTimeGPS ` , ` float ` , ` str ` GPS start time of required data , any input parseable by ` ~ gwpy . time . to _ gps ` is fine end : ` ~ gwpy . time . LIGOTimeGPS ` , ` float ` , ` str ` , optional GPS end time of required data , defaults to end of data found ; any input parseable by ` ~ gwpy . time . to _ gps ` is fine bits : ` list ` , optional list of bits names for this ` StateVector ` , give ` None ` at any point in the list to mask that bit format : ` str ` , optional source format identifier . If not given , the format will be detected if possible . See below for list of acceptable formats . nproc : ` int ` , optional , default : ` 1 ` number of parallel processes to use , serial process by default . gap : ` str ` , optional how to handle gaps in the cache , one of - ' ignore ' : do nothing , let the undelying reader method handle it - ' warn ' : do nothing except print a warning to the screen - ' raise ' : raise an exception upon finding a gap ( default ) - ' pad ' : insert a value to fill the gaps pad : ` float ` , optional value with which to fill gaps in the source data , only used if gap is not given , or ` gap = ' pad ' ` is given Examples To read the S6 state vector , with names for all the bits : : > > > sv = StateVector . read ( ' H - H1 _ LDAS _ C02 _ L2-968654592-128 . gwf ' , ' H1 : IFO - SV _ STATE _ VECTOR ' , bits = [ ' Science mode ' , ' Conlog OK ' , ' Locked ' , ' No injections ' , ' No Excitations ' ] , dtype = ' uint32 ' ) then you can convert these to segments > > > segments = sv . to _ dqflags ( ) or to read just the interferometer operations bits : : > > > sv = StateVector . read ( ' H - H1 _ LDAS _ C02 _ L2-968654592-128 . gwf ' , ' H1 : IFO - SV _ STATE _ VECTOR ' , bits = [ ' Science mode ' , None , ' Locked ' ] , dtype = ' uint32 ' ) Running ` to _ dqflags ` on this example would only give 2 flags , rather than all five . Alternatively the ` bits ` attribute can be reset after reading , but before any further operations . Notes"""
return super ( StateVector , cls ) . read ( source , * args , ** kwargs )
def to_pandas ( self ) : """Convert to pandas Index . Returns pandas . base . Index"""
if not self . is_raw ( ) : raise ValueError ( 'Cannot convert to pandas Index if not evaluated.' ) from pandas import Index as PandasIndex return PandasIndex ( self . values , self . dtype , name = self . name )
def halt ( self ) : """Halt current endpoint ."""
try : self . _halt ( ) except IOError as exc : if exc . errno != errno . EBADMSG : raise else : raise ValueError ( 'halt did not return EBADMSG ?' ) self . _halted = True
def triangle_area ( pt1 , pt2 , pt3 ) : r"""Return the area of a triangle . Parameters pt1 : ( X , Y ) ndarray Starting vertex of a triangle pt2 : ( X , Y ) ndarray Second vertex of a triangle pt3 : ( X , Y ) ndarray Ending vertex of a triangle Returns area : float Area of the given triangle ."""
a = 0.0 a += pt1 [ 0 ] * pt2 [ 1 ] - pt2 [ 0 ] * pt1 [ 1 ] a += pt2 [ 0 ] * pt3 [ 1 ] - pt3 [ 0 ] * pt2 [ 1 ] a += pt3 [ 0 ] * pt1 [ 1 ] - pt1 [ 0 ] * pt3 [ 1 ] return abs ( a ) / 2
def require_ajax_logged_in ( func ) : """Check if ajax API is logged in and login if not"""
@ functools . wraps ( func ) def inner_func ( self , * pargs , ** kwargs ) : if not self . _ajax_api . logged_in : logger . info ( 'Logging into AJAX API for required meta method' ) if not self . has_credentials : raise ApiLoginFailure ( 'Login is required but no credentials were provided' ) self . _ajax_api . User_Login ( name = self . _state [ 'username' ] , password = self . _state [ 'password' ] ) return func ( self , * pargs , ** kwargs ) return inner_func
def key_usage ( self ) : """The : py : class : ` ~ django _ ca . extensions . KeyUsage ` extension , or ` ` None ` ` if it doesn ' t exist ."""
try : ext = self . x509 . extensions . get_extension_for_oid ( ExtensionOID . KEY_USAGE ) except x509 . ExtensionNotFound : return None return KeyUsage ( ext )
def get ( self , ** kwargs ) : """Returns the first object encountered that matches the specified lookup parameters . > > > site _ list . get ( id = 1) { ' url ' : ' http : / / site1 . tld / ' , ' published ' : False , ' id ' : 1} > > > site _ list . get ( published = True , id _ _ lt = 3) { ' url ' : ' http : / / site1 . tld / ' , ' published ' : True , ' id ' : 2} > > > site _ list . filter ( published = True ) . get ( id _ _ lt = 3) { ' url ' : ' http : / / site1 . tld / ' , ' published ' : True , ' id ' : 2} If the QueryList contains multiple elements that match the criteria , only the first match will be returned . Use ` ` filter ( ) ` ` to retrieve the entire set . If no match is found in the QueryList , the method will raise a ` ` NotFound ` ` exception . > > > site _ list . get ( id = None ) Traceback ( most recent call last ) : File " < stdin > " , line 1 , in < module > File " querylist / list . py " , line 113 , in get " Element not found with attributes : % s " % kv _ str ) querylist . list . NotFound : Element not found with attributes : id = None"""
for x in self : if self . _check_element ( kwargs , x ) : return x kv_str = self . _stringify_kwargs ( kwargs ) raise QueryList . NotFound ( "Element not found with attributes: %s" % kv_str )
def stream_create_default_file_stream ( fname , isa_read_stream ) : """Wraps openjp2 library function opj _ stream _ create _ default _ vile _ stream . Sets the stream to be a file stream . This function is only valid for the 2.1 version of the openjp2 library . Parameters fname : str Specifies a file . isa _ read _ stream : bool True ( read ) or False ( write ) Returns stream : stream _ t An OpenJPEG file stream ."""
ARGTYPES = [ ctypes . c_char_p , ctypes . c_int32 ] OPENJP2 . opj_stream_create_default_file_stream . argtypes = ARGTYPES OPENJP2 . opj_stream_create_default_file_stream . restype = STREAM_TYPE_P read_stream = 1 if isa_read_stream else 0 file_argument = ctypes . c_char_p ( fname . encode ( ) ) stream = OPENJP2 . opj_stream_create_default_file_stream ( file_argument , read_stream ) return stream
def heightmap_rain_erosion ( hm : np . ndarray , nbDrops : int , erosionCoef : float , sedimentationCoef : float , rnd : Optional [ tcod . random . Random ] = None , ) -> None : """Simulate the effect of rain drops on the terrain , resulting in erosion . ` ` nbDrops ` ` should be at least hm . size . Args : hm ( numpy . ndarray ) : A numpy . ndarray formatted for heightmap functions . nbDrops ( int ) : Number of rain drops to simulate . erosionCoef ( float ) : Amount of ground eroded on the drop ' s path . sedimentationCoef ( float ) : Amount of ground deposited when the drops stops to flow . rnd ( Optional [ Random ] ) : A tcod . Random instance , or None ."""
lib . TCOD_heightmap_rain_erosion ( _heightmap_cdata ( hm ) , nbDrops , erosionCoef , sedimentationCoef , rnd . random_c if rnd else ffi . NULL , )
def _insert_uow ( self , freerun_entry , flow_request = None ) : """creates unit _ of _ work and inserts it into the DB : raise DuplicateKeyError : if unit _ of _ work with given parameters already exists"""
process_entry = context . process_context [ freerun_entry . process_name ] arguments = process_entry . arguments arguments . update ( freerun_entry . arguments ) if flow_request : schedulable_name = flow_request . schedulable_name timeperiod = flow_request . timeperiod start_timeperiod = flow_request . start_timeperiod end_timeperiod = flow_request . end_timeperiod arguments . update ( flow_request . arguments ) else : schedulable_name = freerun_entry . schedulable_name timeperiod = time_helper . actual_timeperiod ( QUALIFIER_REAL_TIME ) start_timeperiod = timeperiod end_timeperiod = timeperiod uow = UnitOfWork ( ) uow . process_name = schedulable_name uow . timeperiod = timeperiod uow . start_id = 0 uow . end_id = 0 uow . start_timeperiod = start_timeperiod uow . end_timeperiod = end_timeperiod uow . created_at = datetime . utcnow ( ) uow . submitted_at = datetime . utcnow ( ) uow . source = process_entry . source if hasattr ( process_entry , 'source' ) else None uow . sink = process_entry . sink if hasattr ( process_entry , 'sink' ) else None uow . state = unit_of_work . STATE_REQUESTED uow . unit_of_work_type = unit_of_work . TYPE_FREERUN uow . number_of_retries = 0 uow . arguments = arguments uow . db_id = self . uow_dao . insert ( uow ) msg = 'Created: UOW {0} for {1}@{2}.' . format ( uow . db_id , freerun_entry . schedulable_name , timeperiod ) self . _log_message ( INFO , freerun_entry , msg ) return uow
def xymatch ( outfile , filenames , tol = 2 ) : """Given a list of MOPfiles merge them based on x / y coordinates matching . ."""
import math import sys output = { } files = [ ] for filename in filenames : this_file = read ( filename ) # # match files based on the ' X ' and ' Y ' column . # # if those don ' t exist then skip this file if not this_file [ 'data' ] . has_key ( 'X' ) or not this_file [ 'data' ] . has_key ( 'Y' ) : continue if not output . has_key ( 'data' ) : output = this_file continue delete_list = [ ] for keyword in this_file [ 'header' ] : if not keyword in output [ 'header' ] : output [ 'header' ] [ keyword ] = this_file [ 'header' ] [ keyword ] for col in this_file [ 'data' ] . keys ( ) : if not output [ 'data' ] . has_key ( col ) : output [ 'order' ] . append ( col ) output [ 'data' ] [ col ] = [ ] output [ 'order' ] . append ( col ) if this_file [ 'format' ] . has_key ( col ) : output [ 'format' ] [ col ] = this_file [ 'format' ] [ col ] else : output [ 'format' ] [ col ] = '%10s' # # # pad previously values with empties . . for i in range ( len ( output [ 'data' ] [ 'X' ] ) ) : output [ 'data' ] [ col ] . append ( None ) for i in xrange ( len ( output [ 'data' ] [ 'X' ] ) ) : x1 = float ( output [ 'data' ] [ 'X' ] [ i ] ) y1 = float ( output [ 'data' ] [ 'Y' ] [ i ] ) matched = False for j in xrange ( len ( this_file [ 'data' ] [ 'X' ] ) ) : x2 = float ( this_file [ 'data' ] [ 'X' ] [ j ] ) y2 = float ( this_file [ 'data' ] [ 'Y' ] [ j ] ) if ( ( ( x1 - x2 ) ** 2 + ( y1 - y2 ) ** 2 ) < tol ** 2 ) : for col in this_file [ 'data' ] . keys ( ) : if output [ 'data' ] [ col ] [ i ] is None : output [ 'data' ] [ col ] [ i ] = this_file [ 'data' ] [ col ] [ j ] matched = True break if not matched : delete_list . append ( i ) delete_list . sort ( ) delete_list . reverse ( ) for i in delete_list : for col in output [ 'data' ] . keys ( ) : del output [ 'data' ] [ col ] [ i ] write ( outfile , output )
def crop_by_percent ( cmap , per , which = 'both' , N = None ) : '''Crop end or ends of a colormap by per percent . : param cmap : A colormap object , like cmocean . cm . matter . : param per : Percent of colormap to remove . If which = = ' both ' , take this percent off both ends of colormap . If which = = ' min ' or which = = ' max ' , take percent only off the specified end of colormap . : param which = ' both ' : which end or ends of colormap to cut off . which = ' both ' removes from both ends , which = ' min ' from bottom end , and which = ' max ' from top end . : param N = None : User can specify the number of rows for the outgoing colormap . If unspecified , N from incoming colormap will be used and values will be interpolated as needed to fill in rows . Outputs resultant colormap object . This is a wrapper around crop ( ) to make it easier to use for cropping based on percent . Examples : # example with oxy map : cut off yellow part which is top 20% # compare with full colormap vmin = 0 ; vmax = 10 ; pivot = 5 A = np . random . randint ( vmin , vmax , ( 5,5 ) ) fig , axes = plt . subplots ( 1 , 2) mappable = axes [ 0 ] . pcolormesh ( A , vmin = vmin , vmax = vmax , cmap = cmocean . cm . oxy ) fig . colorbar ( mappable , ax = axes [ 0 ] ) vmin = 0 ; vmax = 8 ; pivot = 5 newcmap = crop _ by _ percent ( cmocean . cm . oxy , 20 , which = ' max ' , N = None ) plt . figure ( ) plt . pcolormesh ( A , vmin = vmin , vmax = vmax , cmap = newcmap ) plt . colorbar ( ) # example with oxy map : cut off red part which is bottom 20% # compare with full colormap vmin = 0 ; vmax = 10 ; pivot = 5 A = np . random . randint ( vmin , vmax , ( 5,5 ) ) fig , axes = plt . subplots ( 1 , 2) mappable = axes [ 0 ] . pcolormesh ( A , vmin = vmin , vmax = vmax , cmap = cmocean . cm . oxy ) fig . colorbar ( mappable , ax = axes [ 0 ] ) vmin = 2 ; vmax = 10 ; pivot = 5 A = np . random . randint ( vmin , vmax , ( 5,5 ) ) newcmap = crop _ by _ percent ( cmocean . cm . oxy , 20 , which = ' min ' , N = None ) plt . figure ( ) plt . pcolormesh ( A , vmin = vmin , vmax = vmax , cmap = newcmap ) plt . colorbar ( ) # crop both dark ends off colormap to reduce range newcmap = crop _ by _ percent ( cmocean . cm . balance , 10 , which = ' both ' , N = None ) plt . figure ( ) A = np . random . randint ( - 5 , 5 , ( 5,5 ) ) plt . pcolormesh ( A , vmin = vmin , vmax = vmax , cmap = newcmap ) plt . colorbar ( )'''
if which == 'both' : # take percent off both ends of cmap vmin = - 100 ; vmax = 100 ; pivot = 0 dmax = per elif which == 'min' : # take percent off bottom of cmap vmax = 10 ; pivot = 5 vmin = ( 0 + per / 100 ) * 2 * pivot dmax = None elif which == 'max' : # take percent off top of cmap vmin = 0 ; pivot = 5 vmax = ( 1 - per / 100 ) * 2 * pivot dmax = None newcmap = crop ( cmap , vmin , vmax , pivot , dmax = dmax , N = N ) return newcmap
def sendRequest ( self , socket , cmd , args = ( ) , timeout = 10 ) : '''Perform client request / reply Request is a ZMQ multipart message : - command string - pickled argument list Reply is a pickled object'''
self . _logger . debug ( "sending request %s %s" % ( cmd , args ) ) t0 = time . time ( ) self . _sendMultiPartWithBarrierTimeout ( socket , [ cmd . encode ( ) , pickle . dumps ( args , Constants . PICKLE_PROTOCOL ) ] , timeout ) toBeReturned = self . receiveWithTimeout ( socket , timeout ) retObj = pickle . loads ( toBeReturned , ** pickle_options ) self . _logger . debug ( "%s received %s in %.3fs" % ( cmd , str ( retObj ) , time . time ( ) - t0 ) ) if isinstance ( retObj , Exception ) : raise retObj else : return retObj
def relative_humidity_from_dewpoint ( temperature , dewpt ) : r"""Calculate the relative humidity . Uses temperature and dewpoint in celsius to calculate relative humidity using the ratio of vapor pressure to saturation vapor pressures . Parameters temperature : ` pint . Quantity ` The temperature dew point : ` pint . Quantity ` The dew point temperature Returns ` pint . Quantity ` The relative humidity See Also saturation _ vapor _ pressure"""
e = saturation_vapor_pressure ( dewpt ) e_s = saturation_vapor_pressure ( temperature ) return ( e / e_s )
def difference ( self , * args ) : """Take the difference between one array and a number of other arrays . Only the elements present in just the first array will remain ."""
setobj = set ( self . obj ) for i , v in enumerate ( args ) : setobj = setobj - set ( args [ i ] ) return self . _wrap ( self . _clean . _toOriginal ( setobj ) )
def _ensure_config_file_exists ( ) : """Makes sure the config file exists . : raises : : class : ` epab . core . new _ config . exc . ConfigFileNotFoundError `"""
config_file = Path ( ELIBConfig . config_file_path ) . absolute ( ) if not config_file . exists ( ) : raise ConfigFileNotFoundError ( ELIBConfig . config_file_path )
def sipdir_is_finished ( sipdir ) : """Return the state of modeling and inversion for a given SIP dir . The result does not take into account sensitivities or potentials , as optionally generated by CRMod . Parameters sipdir : string Directory to check Returns crmod _ is _ finished : bool True if all tomodirs of this SIP directory contain finished modeling results . crtomo _ is _ finished : bool True if all tomodirs of this SIP directory contain finished inversion results ."""
if not is_sipdir ( sipdir ) : raise Exception ( 'Directory is not a valid SIP directory!' ) subdirs_raw = sorted ( glob . glob ( sipdir + os . sep + 'invmod' + os . sep + '*' ) ) subdirs = [ x for x in subdirs_raw if os . path . isdir ( x ) ] crmod_finished = True crtomo_finished = True for subdir in subdirs : subcrmod , subcrtomo = td_is_finished ( subdir ) if not subcrmod : crmod_finished = False if not subcrtomo : crtomo_finished = False return crmod_finished , crtomo_finished
def interval_timer ( interval , func , * args , ** kwargs ) : '''Interval timer function . Taken from : http : / / stackoverflow . com / questions / 22498038 / improvement - on - interval - python / 22498708'''
stopped = Event ( ) def loop ( ) : while not stopped . wait ( interval ) : # the first call is after interval func ( * args , ** kwargs ) Thread ( name = 'IntervalTimerThread' , target = loop ) . start ( ) return stopped . set
def _get_entropy ( reference_beats , estimated_beats , bins ) : """Helper function for information gain ( needs to be run twice - once backwards , once forwards ) Parameters reference _ beats : np . ndarray reference beat times , in seconds estimated _ beats : np . ndarray query beat times , in seconds bins : int Number of bins in the beat error histogram Returns entropy : float Entropy of beat error histogram"""
beat_error = np . zeros ( estimated_beats . shape [ 0 ] ) for n in range ( estimated_beats . shape [ 0 ] ) : # Get index of closest annotation to this beat beat_distances = estimated_beats [ n ] - reference_beats closest_beat = np . argmin ( np . abs ( beat_distances ) ) absolute_error = beat_distances [ closest_beat ] # If the first annotation is closest . . . if closest_beat == 0 : # Inter - annotation interval - space between first two beats interval = .5 * ( reference_beats [ 1 ] - reference_beats [ 0 ] ) # If last annotation is closest . . . if closest_beat == ( reference_beats . shape [ 0 ] - 1 ) : interval = .5 * ( reference_beats [ - 1 ] - reference_beats [ - 2 ] ) else : if absolute_error < 0 : # Closest annotation is the one before the current beat # so look at previous inner - annotation - interval start = reference_beats [ closest_beat ] end = reference_beats [ closest_beat - 1 ] interval = .5 * ( start - end ) else : # Closest annotation is the one after the current beat # so look at next inner - annotation - interval start = reference_beats [ closest_beat + 1 ] end = reference_beats [ closest_beat ] interval = .5 * ( start - end ) # The actual error of this beat beat_error [ n ] = .5 * absolute_error / interval # Put beat errors in range ( - . 5 , . 5) beat_error = np . mod ( beat_error + .5 , - 1 ) + .5 # Note these are slightly different the beat evaluation toolbox # ( they are uniform ) histogram_bin_edges = np . linspace ( - .5 , .5 , bins + 1 ) # Get the histogram raw_bin_values = np . histogram ( beat_error , histogram_bin_edges ) [ 0 ] # Turn into a proper probability distribution raw_bin_values = raw_bin_values / ( 1.0 * np . sum ( raw_bin_values ) ) # Set zero - valued bins to 1 to make the entropy calculation well - behaved raw_bin_values [ raw_bin_values == 0 ] = 1 # Calculate entropy return - np . sum ( raw_bin_values * np . log2 ( raw_bin_values ) )
def _parameterize_string ( raw ) : """Substitute placeholders in a string using CloudFormation references Args : raw ( ` str ` ) : String to be processed . Byte strings are not supported ; decode them before passing them to this function . Returns : ` str ` | : class : ` troposphere . GenericHelperFn ` : An expression with placeholders from the input replaced , suitable to be passed to Troposphere to be included in CloudFormation template . This will be the input string without modification if no substitutions are found , and a composition of CloudFormation calls otherwise ."""
parts = [ ] s_index = 0 for match in _PARAMETER_PATTERN . finditer ( raw ) : parts . append ( raw [ s_index : match . start ( ) ] ) parts . append ( { u"Ref" : match . group ( 1 ) } ) s_index = match . end ( ) if not parts : return GenericHelperFn ( raw ) parts . append ( raw [ s_index : ] ) return GenericHelperFn ( { u"Fn::Join" : [ u"" , parts ] } )
def load_from_remote ( remote_name , owner = None ) : """Loads the data from a remote repository . : param remote _ name : The name of the dataset in the remote repository : param owner : ( optional ) The owner of the dataset . If nothing is provided , the current user is used . For public datasets use ' public ' . : return : A new GMQLDataset or a GDataframe"""
from . . import GMQLDataset pmg = get_python_manager ( ) remote_manager = get_remote_manager ( ) parser = remote_manager . get_dataset_schema ( remote_name , owner ) source_table = get_source_table ( ) id = source_table . search_source ( remote = remote_name ) if id is None : id = source_table . add_source ( remote = remote_name , parser = parser ) index = pmg . read_dataset ( str ( id ) , parser . get_gmql_parser ( ) ) remote_sources = [ id ] return GMQLDataset . GMQLDataset ( index = index , location = "remote" , path_or_name = remote_name , remote_sources = remote_sources )
def register_patches ( self ) : """Registers the patches . : return : Method success . : rtype : bool"""
if not self . __paths : return False unregistered_patches = [ ] for path in self . paths : for file in foundations . walkers . files_walker ( path , ( "\.{0}$" . format ( self . __extension ) , ) , ( "\._" , ) ) : name = foundations . strings . get_splitext_basename ( file ) if not self . register_patch ( name , file ) : unregistered_patches . append ( name ) if not unregistered_patches : return True else : raise umbra . exceptions . PatchRegistrationError ( "{0} | '{1}' patches failed to register!" . format ( self . __class__ . __name__ , ", " . join ( unregistered_patches ) ) )
def add_file_to_repo ( filename ) : """Add a file to the git repo This method does the same than a : : $ git add filename Keyword Arguments : : filename : ( str ) - - name of the file to commit Returns : < nothing >"""
try : repo = Repo ( ) index = repo . index index . add ( [ _delta_dir ( ) + filename ] ) except Exception as e : print ( "exception while gitadding file: %s" % e . message )
async def pair ( self ) : """Pair pyatv as a remote control with an Apple TV ."""
# Connect using the specified protocol # TODO : config should be stored elsewhere so that API is same for both protocol = self . atv . service . protocol if protocol == const . PROTOCOL_DMAP : await self . atv . pairing . start ( zeroconf = Zeroconf ( ) , name = self . args . remote_name , pairing_guid = self . args . pairing_guid ) elif protocol == const . PROTOCOL_MRP : await self . atv . pairing . start ( ) # Ask for PIN if present or just wait for pairing to end if self . atv . pairing . device_provides_pin : pin = await _read_input ( self . loop , 'Enter PIN on screen: ' ) self . atv . pairing . pin ( pin ) else : self . atv . pairing . pin ( self . args . pin_code ) print ( 'Use {0} to pair with "{1}" (press ENTER to stop)' . format ( self . args . pin_code , self . args . remote_name ) ) if self . args . pin_code is None : print ( 'Use any pin to pair with "{}" (press ENTER to stop)' . format ( self . args . remote_name ) ) else : print ( 'Use pin {} to pair with "{}" (press ENTER to stop)' . format ( self . args . pin_code , self . args . remote_name ) ) await self . loop . run_in_executor ( None , sys . stdin . readline ) await self . atv . pairing . stop ( ) # Give some feedback to the user if self . atv . pairing . has_paired : print ( 'Pairing seems to have succeeded, yey!' ) print ( 'You may now use these credentials: {0}' . format ( self . atv . pairing . credentials ) ) else : print ( 'Pairing failed!' ) return 1 return 0
def cancel_lb ( self , loadbal_id ) : """Cancels the specified load balancer . : param int loadbal _ id : Load Balancer ID to be cancelled ."""
lb_billing = self . lb_svc . getBillingItem ( id = loadbal_id ) billing_id = lb_billing [ 'id' ] billing_item = self . client [ 'Billing_Item' ] return billing_item . cancelService ( id = billing_id )
def _get_gpu ( ) : """* DEPRECATED * . Allocates first available GPU using cudaSetDevice ( ) , or returns 0 otherwise ."""
# Note : this code executes , but Tensorflow subsequently complains that the " current context was not created by the StreamExecutor cuda _ driver API " system = platform . system ( ) if system == "Linux" : libcudart = ct . cdll . LoadLibrary ( "libcudart.so" ) elif system == "Darwin" : libcudart = ct . cdll . LoadLibrary ( "libcudart.dylib" ) elif system == "Windows" : libcudart = ct . windll . LoadLibrary ( "libcudart.dll" ) else : raise NotImplementedError ( "Cannot identify system." ) device_count = ct . c_int ( ) libcudart . cudaGetDeviceCount ( ct . byref ( device_count ) ) gpu = 0 for i in range ( device_count . value ) : if ( 0 == libcudart . cudaSetDevice ( i ) and 0 == libcudart . cudaFree ( 0 ) ) : gpu = i break return gpu
def input_dialog ( self , title = "Enter a value" , message = "Enter a value" , default = "" , ** kwargs ) : """Show an input dialog Usage : C { dialog . input _ dialog ( title = " Enter a value " , message = " Enter a value " , default = " " , * * kwargs ) } @ param title : window title for the dialog @ param message : message displayed above the input box @ param default : default value for the input box @ return : a tuple containing the exit code and user input @ rtype : C { DialogData ( int , str ) }"""
return self . _run_zenity ( title , [ "--entry" , "--text" , message , "--entry-text" , default ] , kwargs )
def get_commit_message ( self , commit_sha ) : """Return the commit message for the current commit hash , replace # < PRID > with GH - < PRID >"""
cmd = [ "git" , "show" , "-s" , "--format=%B" , commit_sha ] output = subprocess . check_output ( cmd , stderr = subprocess . STDOUT ) message = output . strip ( ) . decode ( "utf-8" ) if self . config [ "fix_commit_msg" ] : return message . replace ( "#" , "GH-" ) else : return message
def iter_qs ( qs , adapter ) : '''Safely iterate over a DB QuerySet yielding ES documents'''
for obj in qs . no_cache ( ) . no_dereference ( ) . timeout ( False ) : if adapter . is_indexable ( obj ) : try : doc = adapter . from_model ( obj ) . to_dict ( include_meta = True ) yield doc except Exception as e : model = adapter . model . __name__ log . error ( 'Unable to index %s "%s": %s' , model , str ( obj . id ) , str ( e ) , exc_info = True )
def chmod ( path , mode = None , user = None , group = None , other = None , recursive = False ) : """Changes file mode permissions . > > > if chmod ( ' / tmp / one ' , 0755 ) : . . . print ( ' OK ' ) OK NOTE : The precending ` ` 0 ` ` is required when using a numerical mode ."""
successful = True mode = _ops_mode ( mode ) if user is not None : mode . user = user if group is not None : mode . group = group if other is not None : mode . other = other if recursive : for p in find ( path , no_peek = True ) : successful = _chmod ( p , mode ) and successful else : successful = _chmod ( path , mode ) return successful
def logs ( self ) : """returns an object to work with the site logs"""
if self . _resources is None : self . __init ( ) if "logs" in self . _resources : url = self . _url + "/logs" return _logs . Log ( url = url , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port , initialize = True ) else : return None
def writeTable ( self , tableName ) : """Write the table corresponding to the specified name , equivalent to the AMPL statement . . code - block : : ampl write table tableName ; Args : tableName : Name of the table to be written ."""
lock_and_call ( lambda : self . _impl . writeTable ( tableName ) , self . _lock )
def delete ( self , item ) : """Delete single item from SingleBlockManager . Ensures that self . blocks doesn ' t become empty ."""
loc = self . items . get_loc ( item ) self . _block . delete ( loc ) self . axes [ 0 ] = self . axes [ 0 ] . delete ( loc )
def pack ( self ) : """The ` CodeAttribute ` in packed byte string form ."""
with io . BytesIO ( ) as file_out : file_out . write ( pack ( '>HHI' , self . max_stack , self . max_locals , len ( self . _code ) ) ) file_out . write ( self . _code ) file_out . write ( pack ( '>H' , len ( self . exception_table ) ) ) for exception in self . exception_table : file_out . write ( pack ( '>HHHH' , * exception ) ) self . attributes . pack ( file_out ) return file_out . getvalue ( )
def _updateCanvasDraw ( self ) : """Overload of the draw function that update axes position before each draw"""
fn = self . canvas . draw def draw2 ( * a , ** k ) : self . _updateGridSpec ( ) return fn ( * a , ** k ) self . canvas . draw = draw2
def _create_sagemaker_pipeline_model ( self , instance_type ) : """Create a SageMaker Model Entity Args : instance _ type ( str ) : The EC2 instance type that this Model will be used for , this is only used to determine if the image needs GPU support or not . accelerator _ type ( str ) : Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference , for example , ' ml . eia1 . medium ' . If not specified , no Elastic Inference accelerator will be attached to the endpoint ."""
if not self . sagemaker_session : self . sagemaker_session = Session ( ) containers = self . pipeline_container_def ( instance_type ) self . name = self . name or name_from_image ( containers [ 0 ] [ 'Image' ] ) self . sagemaker_session . create_model ( self . name , self . role , containers , vpc_config = self . vpc_config )
def dict_get_path ( data , path , default = None ) : """Returns the value inside nested structure of data located at period delimited path When traversing a list , as long as that list is containing objects of type dict , items in that list will have their " name " and " type " values tested against the current key in the path . Args : data ( dict or list ) : data to traverse path ( str ) : ' . ' delimited string Kwargs : default : value to return if path does not exist"""
keys = path . split ( "." ) for k in keys : if type ( data ) == list : found = False for item in data : name = item . get ( "name" , item . get ( "type" ) ) if name == k : found = True data = item break if not found : return default elif type ( data ) == dict : if k in data : data = data [ k ] else : return default else : return default return data
def get_total_alignment_score ( bam ) : '''Returns total of AS : tags in the input BAM'''
sam_reader = pysam . Samfile ( bam , "rb" ) total = 0 for sam in sam_reader . fetch ( until_eof = True ) : try : total += sam . opt ( 'AS' ) except : pass return total
def help_text ( cls ) : """Return a slack - formatted list of commands with their usage ."""
docs = [ cmd_func . __doc__ for cmd_func in cls . commands . values ( ) ] # Don ' t want to include ' usage : ' or explanation . usage_lines = [ doc . partition ( '\n' ) [ 0 ] for doc in docs ] terse_lines = [ line [ len ( 'Usage: ' ) : ] for line in usage_lines ] terse_lines . sort ( ) return '\n' . join ( [ 'Available commands:\n' ] + terse_lines )
def calculate_auc_diff ( auc_structure_1 , auc_structure_2 , sort_order ) : """returns the absolute value of the difference in ROC AUC values and corresponding statistics . specifically , | AUC1 - AUC2 | , the 95 % confidence interval , and the 2 - sided p - value : param sort _ order : : param auc _ structure _ 1 : list [ ( id , best _ score , best _ query , status , fpf , tpf ) , . . . , ] : param auc _ structure _ 2 : list [ ( id , best _ score , best _ query , status , fpf , tpf ) , . . . , ] : return ( AUC1 - AUC2 , AUC1 - AUC2 - CI , AUC1 - AUC2 + CI , p - value ) : tuple"""
# determine auc and variance values for both sets auc1 = calculate_auc ( auc_structure_1 , sort_order , 'diff' ) var1a , var1d = calculate_auc_var ( auc_structure_1 ) auc2 = calculate_auc ( auc_structure_2 , sort_order , 'diff' ) var2a , var2d = calculate_auc_var ( auc_structure_2 ) # determine covariances between sets covara , covard = calculate_auc_covar ( auc_structure_1 , auc_structure_2 ) # determine standard error vardiffa = var2a + var1a - 2 * covara vardiffd = var2d + var1a - 2 * covard p = len ( [ x for x in auc_structure_1 if x [ 3 ] == '1' ] ) n = len ( [ x for x in auc_structure_1 if x [ 3 ] == '0' ] ) se = ( vardiffa / p + vardiffd / n ) ** 0.5 # confidence interval ci = se * stats . t . ppf ( 1 - 0.025 , n + p - 1 ) # 95 % confidence interval # AUC bounds diff = auc1 - auc2 lower = diff - ci upper = diff + ci # 2 - sided p - value prob ( diff > = abs ( diff ) ) = prob ( t > = abs ( tt ) ) # corresponds to the null hypothesis : the two - methods perform identically tt = ( ( diff ** 2 ) ** 0.5 ) / se p_value = 2 * stats . t . sf ( tt , n + p - 1 ) return ( auc1 , auc2 , diff , ( lower , upper ) , p_value )
def not_ ( self ) : '''Negates this instance ' s query expression using MongoDB ' s ` ` $ not ` ` operator * * Example * * : ` ` ( User . name = = ' Jeff ' ) . not _ ( ) ` ` . . note : : Another usage is via an operator , but parens are needed to get past precedence issues : ` ` ~ ( User . name = = ' Jeff ' ) ` `'''
ret_obj = { } for k , v in self . obj . items ( ) : if not isinstance ( v , dict ) : ret_obj [ k ] = { '$ne' : v } continue num_ops = len ( [ x for x in v if x [ 0 ] == '$' ] ) if num_ops != len ( v ) and num_ops != 0 : raise BadQueryException ( '$ operator used in field name' ) if num_ops == 0 : ret_obj [ k ] = { '$ne' : v } continue for op , value in v . items ( ) : k_dict = ret_obj . setdefault ( k , { } ) not_dict = k_dict . setdefault ( '$not' , { } ) not_dict [ op ] = value return QueryExpression ( ret_obj )
def fit ( self , X , y = None , ** kwargs ) : """Fits n KMeans models where n is the length of ` ` self . k _ values _ ` ` , storing the silhouette scores in the ` ` self . k _ scores _ ` ` attribute . The " elbow " and silhouette score corresponding to it are stored in ` ` self . elbow _ value ` ` and ` ` self . elbow _ score ` ` respectively . This method finishes up by calling draw to create the plot ."""
self . k_scores_ = [ ] self . k_timers_ = [ ] if self . locate_elbow : self . elbow_value_ = None self . elbow_score_ = None for k in self . k_values_ : # Compute the start time for each model start = time . time ( ) # Set the k value and fit the model self . estimator . set_params ( n_clusters = k ) self . estimator . fit ( X ) # Append the time and score to our plottable metrics self . k_timers_ . append ( time . time ( ) - start ) self . k_scores_ . append ( self . scoring_metric ( X , self . estimator . labels_ ) ) if self . locate_elbow : locator_kwargs = { 'distortion' : { 'curve_nature' : 'convex' , 'curve_direction' : 'decreasing' } , 'silhouette' : { 'curve_nature' : 'concave' , 'curve_direction' : 'increasing' } , 'calinski_harabaz' : { 'curve_nature' : 'concave' , 'curve_direction' : 'increasing' } , } . get ( self . metric , { } ) elbow_locator = KneeLocator ( self . k_values_ , self . k_scores_ , ** locator_kwargs ) self . elbow_value_ = elbow_locator . knee if self . elbow_value_ == None : warning_message = "No 'knee' or 'elbow' point detected, " "pass `locate_elbow=False` to remove the warning" warnings . warn ( warning_message , YellowbrickWarning ) else : self . elbow_score_ = self . k_scores_ [ self . k_values_ . index ( self . elbow_value_ ) ] self . draw ( ) return self
def axis_names ( self ) -> Tuple [ str , ... ] : """Names of axes ( stored in meta - data ) ."""
default = [ "axis{0}" . format ( i ) for i in range ( self . ndim ) ] return tuple ( self . _meta_data . get ( "axis_names" , None ) or default )
def variables ( names , ** kwargs ) : """Convenience function for the creation of multiple variables . For more control , consider using ` ` symbols ( names , cls = Variable , * * kwargs ) ` ` directly . : param names : string of variable names . Example : x , y = variables ( ' x , y ' ) : param kwargs : kwargs to be passed onto : func : ` sympy . core . symbol . symbols ` : return : iterable of : class : ` symfit . core . argument . Variable ` objects"""
return symbols ( names , cls = Variable , seq = True , ** kwargs )
def copy ( self ) : """Creates a shallow copy of the collection # Returns ` Collection ` > A copy of the ` Collection `"""
collectedCopy = copy . copy ( self ) collectedCopy . _collection = copy . copy ( collectedCopy . _collection ) self . _collectedTypes = copy . copy ( self . _collectedTypes ) self . _allowedTypes = copy . copy ( self . _allowedTypes ) collectedCopy . errors = copy . copy ( collectedCopy . errors ) return collectedCopy
def clear_max_string_length ( self ) : """stub"""
if ( self . get_max_string_length_metadata ( ) . is_read_only ( ) or self . get_max_string_length_metadata ( ) . is_required ( ) ) : raise NoAccess ( ) self . my_osid_object_form . _my_map [ 'maxStringLength' ] = self . get_max_string_length_metadata ( ) . get_default_cardinal_values ( ) [ 0 ]
def get_valid_user_by_email ( email ) : """Return user instance"""
user = get_user ( email ) if user : if user . valid is False : return Err ( "user not valid" ) return Ok ( user ) return Err ( "user not exists" )
def get_deployment_targets ( self , project , deployment_group_id , tags = None , name = None , partial_name_match = None , expand = None , agent_status = None , agent_job_result = None , continuation_token = None , top = None , enabled = None , property_filters = None ) : """GetDeploymentTargets . [ Preview API ] Get a list of deployment targets in a deployment group . : param str project : Project ID or project name : param int deployment _ group _ id : ID of the deployment group . : param [ str ] tags : Get only the deployment targets that contain all these comma separted list of tags . : param str name : Name pattern of the deployment targets to return . : param bool partial _ name _ match : When set to true , treats * * name * * as pattern . Else treats it as absolute match . Default is * * false * * . : param str expand : Include these additional details in the returned objects . : param str agent _ status : Get only deployment targets that have this status . : param str agent _ job _ result : Get only deployment targets that have this last job result . : param str continuation _ token : Get deployment targets with names greater than this continuationToken lexicographically . : param int top : Maximum number of deployment targets to return . Default is * * 1000 * * . : param bool enabled : Get only deployment targets that are enabled or disabled . Default is ' null ' which returns all the targets . : param [ str ] property _ filters : : rtype : [ DeploymentMachine ]"""
route_values = { } if project is not None : route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' ) if deployment_group_id is not None : route_values [ 'deploymentGroupId' ] = self . _serialize . url ( 'deployment_group_id' , deployment_group_id , 'int' ) query_parameters = { } if tags is not None : tags = "," . join ( tags ) query_parameters [ 'tags' ] = self . _serialize . query ( 'tags' , tags , 'str' ) if name is not None : query_parameters [ 'name' ] = self . _serialize . query ( 'name' , name , 'str' ) if partial_name_match is not None : query_parameters [ 'partialNameMatch' ] = self . _serialize . query ( 'partial_name_match' , partial_name_match , 'bool' ) if expand is not None : query_parameters [ '$expand' ] = self . _serialize . query ( 'expand' , expand , 'str' ) if agent_status is not None : query_parameters [ 'agentStatus' ] = self . _serialize . query ( 'agent_status' , agent_status , 'str' ) if agent_job_result is not None : query_parameters [ 'agentJobResult' ] = self . _serialize . query ( 'agent_job_result' , agent_job_result , 'str' ) if continuation_token is not None : query_parameters [ 'continuationToken' ] = self . _serialize . query ( 'continuation_token' , continuation_token , 'str' ) if top is not None : query_parameters [ '$top' ] = self . _serialize . query ( 'top' , top , 'int' ) if enabled is not None : query_parameters [ 'enabled' ] = self . _serialize . query ( 'enabled' , enabled , 'bool' ) if property_filters is not None : property_filters = "," . join ( property_filters ) query_parameters [ 'propertyFilters' ] = self . _serialize . query ( 'property_filters' , property_filters , 'str' ) response = self . _send ( http_method = 'GET' , location_id = '2f0aa599-c121-4256-a5fd-ba370e0ae7b6' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters ) return self . _deserialize ( '[DeploymentMachine]' , self . _unwrap_collection ( response ) )
def get_tables ( self ) : """Adds tables to the network . Example > > > writer = UAIWriter ( model ) > > > writer . get _ tables ( )"""
if isinstance ( self . model , BayesianModel ) : cpds = self . model . get_cpds ( ) cpds . sort ( key = lambda x : x . variable ) tables = [ ] for cpd in cpds : values = list ( map ( str , cpd . values . ravel ( ) ) ) tables . append ( values ) return tables elif isinstance ( self . model , MarkovModel ) : factors = self . model . get_factors ( ) tables = [ ] for factor in factors : values = list ( map ( str , factor . values . ravel ( ) ) ) tables . append ( values ) return tables else : raise TypeError ( "Model must be an instance of Markov or Bayesian model." )
def convert_dms_string_to_dd ( dms ) : """Convert a degrees , minutes , and seconds ( DMS ) string representation , such as 38 ° 53 ' 23 " N , to a decimal degrees ( DD ) , which expresses latitude and longitude geographic coordinates as decimal fraction . @ param dms : degrees , minutes , and seconds ( DMS ) string representation of a location on the Earth . @ return : decimal degrees of the geographic coordinates of a location ."""
degree_mark_offset = dms . find ( u'°' ) degrees = float ( dms [ : degree_mark_offset ] . strip ( ) ) minute_mark_offset = dms . find ( u"'" ) minutes = float ( dms [ degree_mark_offset + 1 : minute_mark_offset ] . strip ( ) ) second_mark_offset = dms . find ( u'"' ) seconds = float ( dms [ minute_mark_offset + 1 : second_mark_offset ] . strip ( ) ) return degrees + ( minutes / 60 ) + ( seconds / 3600 )
def prepare ( self , configuration_folder , args_dict , environment ) : """Make a temporary configuration file from the files in our folder"""
self . configuration_folder = configuration_folder if not os . path . isdir ( configuration_folder ) : raise BadOption ( "Specified configuration folder is not a directory!" , wanted = configuration_folder ) available = [ os . path . join ( configuration_folder , name ) for name in os . listdir ( configuration_folder ) ] available_environments = [ os . path . basename ( path ) for path in available if os . path . isdir ( path ) ] available_environments = [ e for e in available_environments if not e . startswith ( '.' ) ] # Make sure the environment exists if environment and environment not in available_environments : raise BadOption ( "Specified environment doesn't exist" , available = available_environments , wanted = environment ) if environment : environment_files = [ os . path . join ( configuration_folder , "accounts.yaml" ) ] for root , dirs , files in os . walk ( os . path . join ( configuration_folder , environment ) ) : environment_files . extend ( os . path . join ( root , filename ) for filename in files ) with tempfile . NamedTemporaryFile ( ) as fle : contents = json . dumps ( { "includes" : environment_files } ) fle . write ( contents . encode ( 'utf-8' ) ) fle . flush ( ) args_dict [ 'aws_syncr' ] [ 'environment' ] = os . path . split ( environment ) [ - 1 ] super ( Collector , self ) . prepare ( fle . name , args_dict )
def _apply_BCs ( self ) : r"""Applies all the boundary conditions that have been specified , by adding values to the * A * and * b * matrices ."""
if 'pore.bc_rate' in self . keys ( ) : # Update b ind = np . isfinite ( self [ 'pore.bc_rate' ] ) self . b [ ind ] = self [ 'pore.bc_rate' ] [ ind ] if 'pore.bc_value' in self . keys ( ) : f = np . abs ( self . A . data ) . mean ( ) # Update b ( impose bc values ) ind = np . isfinite ( self [ 'pore.bc_value' ] ) self . b [ ind ] = self [ 'pore.bc_value' ] [ ind ] * f # Update b ( substract quantities from b to keep A symmetric ) x_BC = np . zeros ( self . b . shape ) x_BC [ ind ] = self [ 'pore.bc_value' ] [ ind ] self . b [ ~ ind ] -= ( self . A . tocsr ( ) * x_BC ) [ ~ ind ] # Update A P_bc = self . toindices ( ind ) indrow = np . isin ( self . A . row , P_bc ) indcol = np . isin ( self . A . col , P_bc ) self . A . data [ indrow ] = 0 # Remove entries from A for all BC rows self . A . data [ indcol ] = 0 # Remove entries from A for all BC cols datadiag = self . A . diagonal ( ) # Add diagonal entries back into A datadiag [ P_bc ] = np . ones_like ( P_bc , dtype = np . float64 ) * f self . A . setdiag ( datadiag ) self . A . eliminate_zeros ( )
def load_into_collections_from_zipfile ( collections , zipfile ) : """Loads resources contained in the given ZIP archive into each of the given collections . The ZIP file is expected to contain a list of file names obtained with the : func : ` get _ collection _ filename ` function , each pointing to a file of zipped collection resource data . : param collections : sequence of collection resources : param str zipfile : ZIP file name"""
with ZipFile ( zipfile ) as zipf : names = zipf . namelist ( ) name_map = dict ( [ ( os . path . splitext ( name ) [ 0 ] , index ) for ( index , name ) in enumerate ( names ) ] ) for coll in collections : coll_name = get_collection_name ( coll ) index = name_map . get ( coll_name ) if index is None : continue coll_fn = names [ index ] ext = os . path . splitext ( coll_fn ) [ 1 ] try : content_type = MimeTypeRegistry . get_type_for_extension ( ext ) except KeyError : raise ValueError ( 'Could not infer MIME type for file ' 'extension "%s".' % ext ) # Strings are always written as UTF - 8 encoded byte strings when # the zip file is created , so we have to wrap the iterator into # a decoding step . coll_data = DecodingStream ( zipf . open ( coll_fn , 'r' ) ) load_into_collection_from_stream ( coll , coll_data , content_type )
def to_frame ( self , data , state ) : """Extract a single frame from the data buffer . The consumed data should be removed from the buffer . If no complete frame can be read , must raise a ` ` NoFrames ` ` exception . : param data : A ` ` bytearray ` ` instance containing the data so far read . : param state : An instance of ` ` FramerState ` ` . If the buffer contains a partial frame , this object can be used to store state information to allow the remainder of the frame to be read . : returns : A frame . The frame may be any object . The stock framers always return bytes ."""
# Convert the data to bytes frame = six . binary_type ( data ) # Clear the buffer del data [ : ] # Return the frame return frame
def Handle ( self , args , token = None ) : """Renders specified config option ."""
if not args . name : raise ValueError ( "Name not specified." ) return ApiConfigOption ( ) . InitFromConfigOption ( args . name )
def remove_parameters_all ( self , twig = None , ** kwargs ) : """Remove all : class : ` Parameter ` s that match the search from the ParameterSet . Any Parameter that would be included in the resulting ParameterSet from a : func : ` filter ` call with the same arguments will be removed from this ParameterSet . Note : removing Parameters from a ParameterSet will not remove them from any parent ParameterSets ( including the : class : ` phoebe . frontend . bundle . Bundle ` ) : parameter str twig : the twig to search for the parameter : parameter * * kwargs : meta - tags to search"""
params = self . filter ( twig = twig , check_visible = False , check_default = False , ** kwargs ) for param in params . to_list ( ) : self . _remove_parameter ( param )
def viewport ( value ) : """2 - element list of ints : Dimensions of the viewport The viewport is a bounding box containing the visualization . If the dimensions of the visualization are larger than the viewport , then the visualization will be scrollable . If undefined , then the full visualization is shown ."""
if len ( value ) != 2 : raise ValueError ( 'viewport must have 2 dimensions' ) for v in value : _assert_is_type ( 'viewport dimension' , v , int ) if v < 0 : raise ValueError ( 'viewport dimensions cannot be negative' )
def create ( self , stream , start , parameters , sources , end = None ) : """Create a hitorics preview job . Uses API documented at http : / / dev . datasift . com / docs / api / rest - api / endpoints / previewcreate : param stream : hash of the CSDL filter to create the job for : type stream : str : param start : Unix timestamp for the start of the period : type start : int : param parameters : list of historics preview parameters , can be found at http : / / dev . datasift . com / docs / api / rest - api / endpoints / previewcreate : type parameters : list : param sources : list of sources to include , eg . [ ' tumblr ' , ' facebook ' ] : type sources : list : param end : ( optional ) Unix timestamp for the end of the period , defaults to min ( start + 24h , now - 1h ) : type end : int : return : dict of REST API output with headers attached : rtype : : class : ` ~ datasift . request . DictResponse ` : raises : : class : ` ~ datasift . exceptions . HistoricSourcesRequired ` , : class : ` ~ datasift . exceptions . DataSiftApiException ` , : class : ` requests . exceptions . HTTPError `"""
if len ( sources ) == 0 : raise HistoricSourcesRequired ( ) if isinstance ( sources , six . string_types ) : sources = [ sources ] params = { 'hash' : stream , 'start' : start , 'sources' : ',' . join ( sources ) , 'parameters' : ',' . join ( parameters ) } if end : params [ 'end' ] = end return self . request . post ( 'create' , params )
def normalize_pipeline_name ( name = '' ) : """Translate unsafe characters to underscores ."""
normalized_name = name for bad in '\\/?%#' : normalized_name = normalized_name . replace ( bad , '_' ) return normalized_name
def write_hector_input ( scenario , path = None ) : """Writes a scenario DataFrame to a CSV emissions file as used in Hector . Parameters scenario : DataFrame DataFrame with emissions . path : file - like object or path Returns out : str If no path is given a String of the output is returned ."""
# Output header format : # ; Scenario name # ; Generated with pyhector # ; UNITS : GtC / yr GtC / yr [ . . . ] # Date ffi _ emissions luc _ emissions [ . . . ] out = "" try : name = "; " + scenario . name + "\n" except AttributeError : name = "; Hector Scenario\n" out += name out += "; Written with pyhector\n" unit_names = [ units [ source ] for source in scenario . columns ] out += ";UNITS:," + "," . join ( unit_names ) + "\n" out += scenario . to_csv ( ) if isinstance ( path , str ) : f = open ( path , "w" ) elif path is None : return out else : f = path f . write ( out ) if hasattr ( f , "close" ) : f . close ( ) return None
def from_dict ( posterior = None , * , posterior_predictive = None , sample_stats = None , prior = None , prior_predictive = None , sample_stats_prior = None , observed_data = None , coords = None , dims = None ) : """Convert Dictionary data into an InferenceData object . Parameters posterior : dict posterior _ predictive : dict sample _ stats : dict " log _ likelihood " variable for stats needs to be here . prior : dict prior _ predictive : dict observed _ data : dict coords : dict [ str , iterable ] A dictionary containing the values that are used as index . The key is the name of the dimension , the values are the index values . dims : dict [ str , List ( str ) ] A mapping from variables to a list of coordinate names for the variable . Returns InferenceData object"""
return DictConverter ( posterior = posterior , posterior_predictive = posterior_predictive , sample_stats = sample_stats , prior = prior , prior_predictive = prior_predictive , sample_stats_prior = sample_stats_prior , observed_data = observed_data , coords = coords , dims = dims , ) . to_inference_data ( )
def save ( self ) : """Saves dictionary to disk in JSON format ."""
if self . filename is None : raise StoreException ( "Filename must be set to write store to disk" ) # We need an atomic way of re - writing the settings , we also need to # prevent only overwriting part of the settings file ( see bug # 116 ) . # Create a temp file and only then re - name it to the config filename = "{filename}.{date}.tmp" . format ( filename = self . filename , date = datetime . datetime . utcnow ( ) . strftime ( '%Y-%m-%dT%H_%M_%S.%f' ) ) # The ` open ` built - in doesn ' t allow us to set the mode mode = stat . S_IRUSR | stat . S_IWUSR # 0600 fd = os . open ( filename , os . O_WRONLY | os . O_CREAT , mode ) fout = os . fdopen ( fd , "w" ) fout . write ( json . dumps ( self . export ( ) ) ) fout . close ( ) # Now we should remove the old config if os . path . isfile ( self . filename ) : os . remove ( self . filename ) # Now rename the temp file to the real config file os . rename ( filename , self . filename )
def revoke_tokens ( self ) : """Revoke the authorization token and all tokens that were generated using it ."""
self . is_active = False self . save ( ) self . refresh_token . revoke_tokens ( )
def _process_bracket ( self , trial_runner , bracket , trial ) : """This is called whenever a trial makes progress . When all live trials in the bracket have no more iterations left , Trials will be successively halved . If bracket is done , all non - running trials will be stopped and cleaned up , and during each halving phase , bad trials will be stopped while good trials will return to " PENDING " ."""
action = TrialScheduler . PAUSE if bracket . cur_iter_done ( ) : if bracket . finished ( ) : bracket . cleanup_full ( trial_runner ) return TrialScheduler . STOP good , bad = bracket . successive_halving ( self . _reward_attr ) # kill bad trials self . _num_stopped += len ( bad ) for t in bad : if t . status == Trial . PAUSED : trial_runner . stop_trial ( t ) elif t . status == Trial . RUNNING : bracket . cleanup_trial ( t ) action = TrialScheduler . STOP else : raise Exception ( "Trial with unexpected status encountered" ) # ready the good trials - if trial is too far ahead , don ' t continue for t in good : if t . status not in [ Trial . PAUSED , Trial . RUNNING ] : raise Exception ( "Trial with unexpected status encountered" ) if bracket . continue_trial ( t ) : if t . status == Trial . PAUSED : trial_runner . trial_executor . unpause_trial ( t ) elif t . status == Trial . RUNNING : action = TrialScheduler . CONTINUE return action
def get_uids ( self , filename = None ) : """Return a list of UIDs filename - - unused , for API compatibility only"""
self . _update ( ) return [ Abook . _gen_uid ( self . _book [ entry ] ) for entry in self . _book . sections ( ) ]
def remove_description_by_language ( self , language_type ) : """Removes the specified description . raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or ` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` ` * compliance : mandatory - - This method must be implemented . *"""
if self . get_descriptions_metadata ( ) . is_read_only ( ) : raise NoAccess ( ) if not isinstance ( language_type , Type ) : raise InvalidArgument ( 'language_type must be instance of Type' ) self . my_osid_object_form . _my_map [ 'descriptions' ] = [ t for t in self . my_osid_object_form . _my_map [ 'descriptions' ] if t [ 'languageTypeId' ] != str ( language_type ) ]
def get_normalized_term ( term_id : str , equivalents : list , namespace_targets : dict ) -> str : """Get normalized term"""
if equivalents and len ( equivalents ) > 0 : for start_ns in namespace_targets : if re . match ( start_ns , term_id ) : for target_ns in namespace_targets [ start_ns ] : for e in equivalents : if e and target_ns in e [ "namespace" ] and e [ "primary" ] : normalized_term = e [ "term_id" ] return normalized_term return term_id
def tm_header ( filename , ppdesc ) : """Parse the TM abinit header . Example : Troullier - Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994 100.00000 14.00000 940714 zatom , zion , pspdat 1 1 3 0 2001 . 00000 pspcod , pspxc , lmax , lloc , mmax , r2well 0 4.085 6.246 0 2.8786493 l , e99.0 , e99.9 , nproj , rcpsp .00000 . 00000 . 00000 . 00000 rms , ekb1 , ekb2 , epsatm 1 3.116 4.632 1 3.4291849 l , e99.0 , e99.9 , nproj , rcpsp .00000 . 00000 . 00000 . 00000 rms , ekb1 , ekb2 , epsatm 2 4.557 6.308 1 2.1865358 l , e99.0 , e99.9 , nproj , rcpsp .00000 . 00000 . 00000 . 00000 rms , ekb1 , ekb2 , epsatm 3 23.251 29.387 1 2.4776730 l , e99.0 , e99.9 , nproj , rcpsp .00000 . 00000 . 00000 . 00000 rms , ekb1 , ekb2 , epsatm 3.62474762267880 . 07409391739104 3.07937699839200 rchrg , fchrg , qchrg"""
lines = _read_nlines ( filename , - 1 ) header = [ ] for lineno , line in enumerate ( lines ) : header . append ( line ) if lineno == 2 : # Read lmax . tokens = line . split ( ) pspcod , pspxc , lmax , lloc = map ( int , tokens [ : 4 ] ) mmax , r2well = map ( float , tokens [ 4 : 6 ] ) # if tokens [ - 1 ] . strip ( ) ! = " pspcod , pspxc , lmax , lloc , mmax , r2well " : # raise RuntimeError ( " % s : Invalid line \ n % s " % ( filename , line ) ) lines = lines [ 3 : ] break # TODO # Parse the section with the projectors . # 0 4.085 6.246 0 2.8786493 l , e99.0 , e99.9 , nproj , rcpsp # .00000 . 00000 . 00000 . 00000 rms , ekb1 , ekb2 , epsatm projectors = OrderedDict ( ) for idx in range ( 2 * ( lmax + 1 ) ) : line = lines [ idx ] if idx % 2 == 0 : proj_info = [ line , ] if idx % 2 == 1 : proj_info . append ( line ) d = _dict_from_lines ( proj_info , [ 5 , 4 ] ) projectors [ int ( d [ "l" ] ) ] = d # Add the last line with info on nlcc . header . append ( lines [ idx + 1 ] ) summary = header [ 0 ] header = _dict_from_lines ( header , [ 0 , 3 , 6 , 3 ] ) return NcAbinitHeader ( summary , ** header )
def register ( self , model_alias , code = 'general' , name = None , order = None , display_filter = None ) : """Register new tab : param model _ alias : : param code : : param name : : param order : : return :"""
model_alias = self . get_model_alias ( model_alias ) def wrapper ( create_layout ) : item = TabItem ( code = code , create_layout = create_layout , name = name , order = order , display_filter = display_filter ) if item in self . tabs [ model_alias ] : raise Exception ( "Tab {} already registered for model {}" . format ( code , model_alias ) ) self . tabs [ model_alias ] . append ( item ) self . tabs [ model_alias ] = sorted ( self . tabs [ model_alias ] , key = lambda item : item . order if item . order else 999 ) return create_layout return wrapper
def get_unbound_arg_names ( arg_names , arg_binding_keys ) : """Determines which args have no arg binding keys . Args : arg _ names : a sequence of the names of possibly bound args arg _ binding _ keys : a sequence of ArgBindingKey each of whose arg names is in arg _ names Returns : a sequence of arg names that is a ( possibly empty , possibly non - proper ) subset of arg _ names"""
bound_arg_names = [ abk . _arg_name for abk in arg_binding_keys ] return [ arg_name for arg_name in arg_names if arg_name not in bound_arg_names ]
def extract_war_version ( war ) : '''Extract the version from the war file name . There does not seem to be a standard for encoding the version into the ` war file name ` _ . . _ ` war file name ` : https : / / tomcat . apache . org / tomcat - 6.0 - doc / deployer - howto . html Examples : . . code - block : : bash / path / salt - 2015.8.6 . war - > 2015.8.6 / path / V6R2013xD5 . war - > None'''
basename = os . path . basename ( war ) war_package = os . path . splitext ( basename ) [ 0 ] # remove ' . war ' version = re . findall ( "-([\\d.-]+)$" , war_package ) # try semver return version [ 0 ] if version and len ( version ) == 1 else None
def get_tags ( self ) : """Returns a list of set of tags ."""
return sorted ( [ frozenset ( meta_graph . meta_info_def . tags ) for meta_graph in self . meta_graphs ] )
def transform ( row , table ) : 'Extract links from " project " field and remove HTML from all'
data = row . _asdict ( ) data [ "links" ] = " " . join ( extract_links ( row . project ) ) for key , value in data . items ( ) : if isinstance ( value , six . text_type ) : data [ key ] = extract_text ( value ) return data
def computeNoCall ( fileName ) : """Computes the number of no call . : param fileName : the name of the file : type fileName : str Reads the ` ` ped ` ` file created by Plink using the ` ` recodeA ` ` options ( see : py : func : ` createPedChr24UsingPlink ` ) and computes the number and percentage of no calls on the chromosome ` ` 24 ` ` ."""
outputFile = None try : outputFile = open ( fileName + ".noCall" , "w" ) except IOError : msg = "%s: can't write file" % fileName + ".noCall" raise ProgramError ( msg ) print >> outputFile , "\t" . join ( [ "PED" , "ID" , "SEX" , "nbGeno" , "nbNoCall" ] ) try : toPrint = [ ] with open ( fileName , "r" ) as inputFile : for i , line in enumerate ( inputFile ) : row = line . rstrip ( "\r\n" ) . split ( " " ) if i != 0 : # This is data genotypes = np . array ( row [ 6 : ] ) nbMarker = len ( genotypes ) nbNA = len ( np . where ( genotypes == "NA" ) [ 0 ] ) toPrint . append ( ( row [ 0 ] , row [ 1 ] , row [ 4 ] , str ( nbMarker ) , str ( nbNA ) ) ) toPrint . sort ( reverse = True , key = lambda values : int ( values [ 4 ] ) ) for row in toPrint : print >> outputFile , "\t" . join ( row ) except IOError : msg = "%(fileName)s: no such file" % locals ( ) raise ProgramError ( msg ) # Closing the output file outputFile . close ( )
async def enter_async_context ( self , cm ) : """Enters the supplied async context manager . If successful , also pushes its _ _ aexit _ _ method as a callback and returns the result of the _ _ aenter _ _ method ."""
_cm_type = type ( cm ) _exit = _cm_type . __aexit__ result = await _cm_type . __aenter__ ( cm ) self . _push_async_cm_exit ( cm , _exit ) return result
def populate_metadata ( model , MetadataClass ) : """For a given model and metadata class , ensure there is metadata for every instance ."""
for instance in model . objects . all ( ) : create_metadata_instance ( MetadataClass , instance )
def add_user ( self , user , first_name = None , last_name = None , email = None , password = None ) : """Add a new user . Args : user ( string ) : User name . first _ name ( optional [ string ] ) : User ' s first name . Defaults to None . last _ name ( optional [ string ] ) : User ' s last name . Defaults to None . email : ( optional [ string ] ) : User ' s email address . Defaults to None . password : ( optional [ string ] ) : User ' s password . Defaults to None . Raises : requests . HTTPError on failure ."""
self . project_service . set_auth ( self . _token_project ) self . project_service . add_user ( user , first_name , last_name , email , password )
def read ( section : str = 'DEFAULT' ) : """reads the ~ / . datadog . ini ` section ` with the following allowed properties : param section identifying a specific datadog account api _ key : Datadog API key type api _ key : string app _ key : Datadog application key type app _ key : string proxies : Proxy to use to connect to Datadog API type proxies : dictionary mapping protocol to the URL of the proxy . api _ host : Datadog API endpoint type api _ host : url statsd _ host : Host of DogStatsd server or statsd daemon type statsd _ host : address statsd _ port : Port of DogStatsd server or statsd daemon type statsd _ port : port statsd _ use _ default _ route : Dynamically set the statsd host to the default route ( Useful when running the client in a container ) type statsd _ use _ default _ route : boolean statsd _ socket _ path : path to the DogStatsd UNIX socket . Supersedes statsd _ host and stats _ port if provided . cacert : Path to local certificate file used to verify SSL certificates . Can also be set to True ( default ) to use the systems certificate store , or False to skip SSL verification type cacert : path or boolean mute : Mute any ApiError or ClientError before they escape from datadog . api . HTTPClient ( default : True ) . type mute : boolean"""
parser = ConfigParser ( ) parser . read ( path . expanduser ( '~/.datadog.ini' ) ) return { k : v for ( k , v ) in parser . items ( section ) if k in allowed_properties }
def find_by ( self , ** kwargs ) : """Find first record subject to restrictions in + kwargs + , raising RecordNotFound if no such record exists ."""
result = self . where ( ** kwargs ) . first ( ) if result : return result else : raise RecordNotFound ( kwargs )
def get_mod_func ( class_string ) : """Converts ' django . views . news . stories . story _ detail ' to ( ' django . views . news . stories ' , ' story _ detail ' ) Taken from django . core . urlresolvers"""
try : dot = class_string . rindex ( '.' ) except ValueError : return class_string , '' return class_string [ : dot ] , class_string [ dot + 1 : ]
def first_time_setup ( self ) : """First time running Open Sesame ? Create keyring and an auto - unlock key in default keyring . Make sure these things don ' t already exist ."""
if not self . _auto_unlock_key_position ( ) : pw = password . create_passwords ( ) [ 0 ] attrs = { 'application' : self . keyring } gkr . item_create_sync ( self . default_keyring , gkr . ITEM_GENERIC_SECRET , self . keyring , attrs , pw , True ) found_pos = self . _auto_unlock_key_position ( ) item_info = gkr . item_get_info_sync ( self . default_keyring , found_pos ) gkr . create_sync ( self . keyring , item_info . get_secret ( ) )
def cmdvel2Twist ( vel ) : '''Translates from JderobotTypes CMDVel to ROS Twist . @ param vel : JderobotTypes CMDVel to translate @ type img : JdeRobotTypes . CMDVel @ return a Twist translated from vel'''
tw = TwistStamped ( ) tw . twist . linear . x = vel . vx tw . twist . linear . y = vel . vy tw . twist . linear . z = vel . vz tw . twist . angular . x = vel . ax tw . twist . angular . y = vel . ay tw . twist . angular . z = vel . az return tw
def wait_for_all_futures ( futures , print_traceback = False ) : """Wait indefinitely for all futures in the input iterable to complete . Use a timeout to enable interrupt handling . Call os . _ exit ( ) in case of KeyboardInterrupt . Otherwise , the atexit registered handler in concurrent . futures . thread will run , and issue blocking join ( ) on all worker threads , requiring us to listen to events in worker threads in order to enable timely exit in response to Ctrl - C . Note : This still doesn ' t handle situations where Ctrl - C is pressed elsewhere in the code and there are worker threads with long - running tasks . Note : os . _ exit ( ) doesn ' t work well with interactive mode ( e . g . ipython ) . This may help : import _ _ main _ _ as main ; if hasattr ( main , ' _ _ file _ _ ' ) : os . _ exit ( ) else : os . exit ( )"""
try : while True : waited_futures = concurrent . futures . wait ( futures , timeout = 60 ) if len ( waited_futures . not_done ) == 0 : break except KeyboardInterrupt : if print_traceback : traceback . print_stack ( ) else : print ( '' ) os . _exit ( os . EX_IOERR )
def extract ( self , member , path = "" , set_attrs = True ) : """Extract a member from the archive to the current working directory , using its full name . Its file information is extracted as accurately as possible . ` member ' may be a filename or a TarInfo object . You can specify a different directory using ` path ' . File attributes ( owner , mtime , mode ) are set unless ` set _ attrs ' is False ."""
self . _check ( "r" ) if isinstance ( member , str ) : tarinfo = self . getmember ( member ) else : tarinfo = member # Prepare the link target for makelink ( ) . if tarinfo . islnk ( ) : tarinfo . _link_target = os . path . join ( path , tarinfo . linkname ) try : self . _extract_member ( tarinfo , os . path . join ( path , tarinfo . name ) , set_attrs = set_attrs ) except EnvironmentError as e : if self . errorlevel > 0 : raise else : if e . filename is None : self . _dbg ( 1 , "tarfile: %s" % e . strerror ) else : self . _dbg ( 1 , "tarfile: %s %r" % ( e . strerror , e . filename ) ) except ExtractError as e : if self . errorlevel > 1 : raise else : self . _dbg ( 1 , "tarfile: %s" % e )
def _get_unapplied_migrations ( self , loader ) : """Output a list of unapplied migrations in the form [ [ ' migration1 ' , migration2 ' ] , . . . ] . This implementation is mostly copied from the Django ' showmigrations ' mgmt command . https : / / github . com / django / django / blob / stable / 1.8 . x / django / core / management / commands / showmigrations . py This should only be called from _ get _ current _ migration _ state ( ) ."""
unapplied = [ ] graph = loader . graph plan = [ ] seen = set ( ) # Generate the plan , in the order that migrations have been / should be applied . for target in graph . leaf_nodes ( ) : for migration in graph . forwards_plan ( target ) : if migration not in seen : plan . append ( graph . nodes [ migration ] ) seen . add ( migration ) # Remove the migrations that have already been applied . for migration in plan : if not ( migration . app_label , migration . name ) in loader . applied_migrations : # NOTE : Unicode Django application names are unsupported . unapplied . append ( [ migration . app_label , str ( migration . name ) ] ) return unapplied
def to_pinyin ( s , accented = True ) : """Convert * s * to Pinyin . If * accented * is ` ` True ` ` , diacritics are added to the Pinyin syllables . If it ' s ` ` False ` ` , numbers are used to indicate tone ."""
identity = identify ( s ) if identity == PINYIN : if _has_accented_vowels ( s ) : return s if accented else accented_to_numbered ( s ) else : return numbered_to_accented ( s ) if accented else s elif identity == ZHUYIN : return zhuyin_to_pinyin ( s , accented = accented ) elif identity == IPA : return ipa_to_pinyin ( s , accented = accented ) else : raise ValueError ( "String is not a valid Chinese transcription." )
def apply ( self , window_length , samples = True , func1d = None ) : """Runs any kind of function over a window . Args : window _ length ( int ) : the window length . Required . samples ( bool ) : window length is in samples . Use False for a window length given in metres . func1d ( function ) : a function that takes a 1D array and returns a scalar . Default : ` ` np . mean ( ) ` ` . Returns : Curve ."""
window_length /= 1 if samples else self . step if func1d is None : func1d = np . mean params = self . __dict__ . copy ( ) out = self . _rolling_window ( int ( window_length ) , func1d ) return Curve ( out , params = params )