code
stringlengths 101
5.91M
|
|---|
_bpe('fastbpe', dataclass=fastBPEConfig)
class fastBPE(object):
def __init__(self, cfg):
if (cfg.bpe_codes is None):
raise ValueError('--bpe-codes is required for --bpe=fastbpe')
codes = file_utils.cached_path(cfg.bpe_codes)
try:
import fastBPE
self.bpe = fastBPE.fastBPE(codes)
self.bpe_symbol = ' '
except ImportError:
raise ImportError('Please install fastBPE with: pip install fastBPE')
def encode(self, x: str) -> str:
return self.bpe.apply([x])[0]
def decode(self, x: str) -> str:
return (x + ' ').replace(self.bpe_symbol, '').rstrip()
|
class ConnectionState():
def __init__(self):
self.sequence_number = (- 1)
self.initialized = False
self.connected = True
def update_sequence(self, request):
if (request.sequence_number <= self.sequence_number):
return
self.sequence_number = request.sequence_number
def is_request_current(self, request):
return (request.sequence_number >= self.sequence_number)
|
def test_graphql(graphql_url):
schema = gql_loaders.from_url(graphql_url)
(initialized, *other, finished) = list(from_schema(schema, hypothesis_settings=hypothesis.settings(max_examples=5, deadline=None)).execute())
assert (initialized.operations_count == 4)
assert (finished.passed_count == 4)
for (event, expected) in zip(other, ['Query.getBooks', 'Query.getBooks', 'Query.getAuthors', 'Query.getAuthors']):
assert (event.verbose_name == expected)
if isinstance(event, events.AfterExecution):
for check in event.result.checks:
assert (check.example.verbose_name == expected)
|
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
(scheme, server, path, parameters, query, fragment) = parts
base = urllib.parse.unquote(path.split('/')[(- 1)])
if ((server == 'sourceforge.net') and (base == 'download')):
base = urllib.parse.unquote(path.split('/')[(- 2)])
if ('#' in base):
(base, fragment) = base.split('#', 1)
return (base, fragment)
|
class Mention():
text: str
title: str
index: int
candidates: List[Candidate]
start: Optional[int] = None
end: Optional[int] = None
|
def is_devicelevel_fpga(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:
from dace.sdfg.utils import is_fpga_kernel
return (is_in_scope(sdfg, state, node, [dtypes.ScheduleType.FPGA_Device]) or ((state is not None) and is_fpga_kernel(sdfg, state)))
|
class FastTextEmbeddings(NeuralEmbeddings):
def __init__(self, model: str='cc.en.300.bin', force_download: bool=True, dir: str=None) -> None:
self.model = model
self.dir = dir
self.force_download = force_download
if (self.dir is None):
self.dir = f'{torch.hub.get_dir()}/{self.model}'
if (self.dir[(- 1)] == '/'):
self.dir = self.dir[:(- 1)]
if ((not os.path.exists(self.dir)) or self.force_download):
if (not os.path.exists(self.dir)):
os.system(f'mkdir {self.dir}')
if ('wiki' in model):
os.system(f'wget -P {self.dir}')
os.system(f'unzip {self.dirl}.zip -d {self.dir}')
os.system(f'rm {self.dir}.zip')
else:
os.system(f'wget -P {self.dir}')
os.system(f'gunzip {self.dir}.gz -d {self.dir}')
os.system(f'rm {self.dir}.gz')
ft = fasttext.load_model(f'{self.dir}/{model}')
words = ft.get_words()
embeddings = torch.tensor(ft.get_input_matrix())
torch.save(words, f'{self.dir}/{model}.words.pt')
torch.save(embeddings, f'{self.dir}/{model}.embeddings.pt')
del ft
else:
try:
words = torch.load(f'{self.dir}/{model}.words.pt')
embeddings = torch.load(f'{self.dir}/{model}.embeddings.pt')
except:
raise Exception(f'Please install the {model} model first by setting force_download to True.')
self.vocabulary_dict = {word: i for (i, word) in enumerate(words)}
self.embedding_layer = torch.nn.Embedding.from_pretrained(embeddings=embeddings, freeze=True)
def __call__(self, tokens: Union[(List[str], str)]) -> Tensor:
return super().__call__(tokens)
def get_embedding(self, tokens: Union[(List[str], str)]) -> Tensor:
return self.__call__(tokens)
|
def to(partition, *args, **kwargs):
device = None
if ('device' in kwargs):
device = kwargs['device']
elif ('tensor' in kwargs):
device = kwargs['tensor'].device
if args:
if isinstance(args[0], (torch.device, int, str)):
device = args[0]
if torch.is_tensor(args[0]):
device = args[0].device
if (not (device is None)):
partition.device = torch.device(device)
return nn.Module.to(partition, *args, **kwargs)
|
(help='')
('--log-dir', type=str, help='logging directory')
('--dataset', default='coco', type=str)
('--dataset_dir', default='', type=str)
('--im-size', default=256, type=int, help='dataset resize size')
('--crop-size', default=256, type=int)
('--window-size', default=256, type=int)
('--window-stride', default=None, type=int)
('--backbone', default='vit_large_patch16_384', type=str)
('--decoder', default='mask_transformer', type=str)
('--optimizer', default='sgd', type=str)
('--scheduler', default='polynomial', type=str)
('--weight-decay', default=0.0, type=float)
('--dropout', default=0.0, type=float)
('--drop-path', default=0.1, type=float)
('--batch-size', default=None, type=int)
('--epochs', default=None, type=int)
('-lr', '--learning-rate', default=None, type=float)
('--normalization', default=None, type=str)
('--eval-freq', default=None, type=int)
('--amp/--no-amp', default=False, is_flag=True)
('--resume/--no-resume', default=True, is_flag=True)
('--local_rank', type=int)
('--only_test', type=bool, default=True)
('--add_mask', type=bool, default=True)
('--partial_finetune', type=bool, default=False)
('--add_l1_loss', type=bool, default=True)
('--l1_weight', type=float, default=10)
('--color_position', type=bool, default=True)
('--change_mask', type=bool, default=False)
('--color_as_condition', type=bool, default=False)
('--multi_scaled', type=bool, default=False)
('--downchannel', type=bool, default=False)
('--add_conv', type=bool, default=True)
('--before_classify', type=bool, default=False)
('--l1_conv', type=bool, default=True)
('--l1_linear', type=bool, default=False)
('--add_fm', type=bool, default=False)
('--fm_weight', type=float, default=1)
('--add_edge', type=bool, default=False)
('--edge_loss_weight', type=float, default=0.05)
('--mask_l_num', type=int, default=4)
('--n_blocks', type=int, default=1)
('--n_layers', type=int, default=2)
('--without_colorattn', type=bool, default=False)
('--without_colorquery', type=bool, default=False)
('--without_classification', type=bool, default=False)
('--mask_random', type=bool, default=False)
('--color_token_num', type=int, default=313)
('--sin_color_pos', type=bool, default=False)
def main(log_dir, dataset, dataset_dir, im_size, crop_size, window_size, window_stride, backbone, decoder, optimizer, scheduler, weight_decay, dropout, drop_path, batch_size, epochs, learning_rate, normalization, eval_freq, amp, resume, local_rank, only_test, add_mask, partial_finetune, add_l1_loss, l1_weight, color_position, change_mask, color_as_condition, multi_scaled, downchannel, add_conv, before_classify, l1_conv, l1_linear, add_fm, fm_weight, add_edge, edge_loss_weight, mask_l_num, n_blocks, n_layers, without_colorattn, without_colorquery, without_classification, mask_random, color_token_num, sin_color_pos):
ptu.set_gpu_mode(True, local_rank)
torch.distributed.init_process_group(backend='gloo')
cfg = config.load_config()
model_cfg = cfg['model'][backbone]
dataset_cfg = cfg['dataset'][dataset]
if ('mask_transformer' in decoder):
decoder_cfg = cfg['decoder']['mask_transformer']
else:
decoder_cfg = cfg['decoder'][decoder]
if (not im_size):
im_size = dataset_cfg['im_size']
if (not crop_size):
crop_size = dataset_cfg.get('crop_size', im_size)
if (not window_size):
window_size = dataset_cfg.get('window_size', im_size)
if (not window_stride):
window_stride = dataset_cfg.get('window_stride', im_size)
if (not dataset_dir):
dataset_dir = dataset_cfg.get('dataset_dir', None)
model_cfg['image_size'] = (crop_size, crop_size)
model_cfg['backbone'] = backbone
model_cfg['dropout'] = dropout
model_cfg['drop_path_rate'] = drop_path
decoder_cfg['name'] = decoder
model_cfg['decoder'] = decoder_cfg
world_batch_size = dataset_cfg['batch_size']
num_epochs = dataset_cfg['epochs']
lr = dataset_cfg['learning_rate']
if batch_size:
world_batch_size = batch_size
if epochs:
num_epochs = epochs
if learning_rate:
lr = learning_rate
if (eval_freq is None):
eval_freq = dataset_cfg.get('eval_freq', 1)
if normalization:
model_cfg['normalization'] = normalization
batch_size = (world_batch_size // ptu.world_size)
variant = dict(world_batch_size=world_batch_size, version='normal', resume=resume, dataset_kwargs=dict(dataset=dataset, image_size=im_size, crop_size=crop_size, batch_size=batch_size, normalization=model_cfg['normalization'], split='train', num_workers=10, dataset_dir=dataset_dir, add_mask=add_mask, patch_size=model_cfg['patch_size'], change_mask=change_mask, multi_scaled=multi_scaled, mask_num=mask_l_num, mask_random=mask_random, n_cls=color_token_num), algorithm_kwargs=dict(batch_size=batch_size, start_epoch=0, num_epochs=num_epochs, eval_freq=eval_freq), optimizer_kwargs=dict(opt=optimizer, lr=lr, weight_decay=weight_decay, momentum=0.9, clip_grad=None, sched=scheduler, epochs=num_epochs, min_lr=1e-05, poly_power=0.9, poly_step_size=1), net_kwargs=model_cfg, amp=amp, log_dir=log_dir, inference_kwargs=dict(im_size=im_size, window_size=window_size, window_stride=window_stride))
log_dir = Path(log_dir)
log_dir.mkdir(parents=True, exist_ok=True)
checkpoint_path = (log_dir / 'checkpoint.pth')
dataset_kwargs = variant['dataset_kwargs']
val_kwargs = dataset_kwargs.copy()
val_kwargs['split'] = 'val'
val_kwargs['batch_size'] = 1
val_loader = create_dataset(val_kwargs)
net_kwargs = variant['net_kwargs']
net_kwargs['n_cls'] = color_token_num
net_kwargs['partial_finetune'] = partial_finetune
net_kwargs['decoder']['add_l1_loss'] = add_l1_loss
net_kwargs['decoder']['color_position'] = color_position
net_kwargs['decoder']['change_mask'] = change_mask
net_kwargs['decoder']['color_as_condition'] = color_as_condition
net_kwargs['decoder']['multi_scaled'] = multi_scaled
net_kwargs['decoder']['crop_size'] = crop_size
net_kwargs['decoder']['downchannel'] = downchannel
net_kwargs['decoder']['add_conv'] = add_conv
net_kwargs['decoder']['before_classify'] = before_classify
net_kwargs['decoder']['l1_conv'] = l1_conv
net_kwargs['decoder']['l1_linear'] = l1_linear
net_kwargs['decoder']['add_edge'] = add_edge
net_kwargs['decoder']['n_blocks'] = n_blocks
net_kwargs['decoder']['n_layers'] = n_layers
net_kwargs['decoder']['without_colorattn'] = without_colorattn
net_kwargs['decoder']['without_colorquery'] = without_colorquery
net_kwargs['decoder']['without_classification'] = without_classification
net_kwargs['decoder']['sin_color_pos'] = sin_color_pos
model = create_segmenter(net_kwargs)
model.to(ptu.device)
amp_autocast = suppress
loss_scaler = None
if amp:
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
assert resume
assert checkpoint_path.exists()
print(f'Resuming training from checkpoint: {checkpoint_path}')
checkpoint = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if ptu.distributed:
print('Distributed:', ptu.distributed)
model = DDP(model, device_ids=[ptu.device], find_unused_parameters=True)
variant_str = yaml.dump(variant)
print(f'''Configuration:
{variant_str}''')
variant['net_kwargs'] = net_kwargs
variant['dataset_kwargs'] = dataset_kwargs
log_dir.mkdir(parents=True, exist_ok=True)
with open((log_dir / 'variant.yml'), 'w') as f:
f.write(variant_str)
start_epoch = variant['algorithm_kwargs']['start_epoch']
num_epochs = variant['algorithm_kwargs']['num_epochs']
eval_freq = variant['algorithm_kwargs']['eval_freq']
model_without_ddp = model
if hasattr(model, 'module'):
model_without_ddp = model.module
print(f'Val dataset length: {len(val_loader.dataset)}')
print(f'Encoder parameters: {num_params(model_without_ddp.encoder)}')
print(f'Decoder parameters: {num_params(model_without_ddp.decoder)}')
for epoch in range(start_epoch, num_epochs):
eval_epoch = (((epoch % eval_freq) == 0) or (epoch == (num_epochs - 1)))
if eval_epoch:
eval_logger = evaluate(epoch, model, val_loader, window_size, window_stride, amp_autocast, add_mask, add_l1_loss, l1_weight, l1_conv, l1_linear, add_fm, fm_weight, log_dir)
print(f'Stats [{epoch}]:', eval_logger, flush=True)
distributed.barrier()
distributed.destroy_process()
|
def test_simple_output(simple_confusion):
assert (EXPECTED_SIMPLE_OUTPUT == format_confusion(simple_confusion))
|
def mean(*seqs: Sequence[Numeric]) -> Union[(Numeric, Sequence[Numeric])]:
singleton = (len(seqs) == 1)
means = [float(np.mean(seq)) for seq in seqs]
return (means[0] if singleton else means)
|
class CrystalOfNakajimaMonomials(InfinityCrystalOfNakajimaMonomials):
def __classcall_private__(cls, cartan_type, La=None, c=None):
if (La is None):
La = cartan_type
cartan_type = La.parent().cartan_type()
cartan_type = CartanType(cartan_type)
if cartan_type.is_affine():
La = RootSystem(cartan_type).weight_lattice(extended=True)(La)
else:
La = RootSystem(cartan_type).weight_lattice()(La)
n = len(cartan_type.index_set())
c = InfinityCrystalOfNakajimaMonomials._normalize_c(c, n)
return super().__classcall__(cls, cartan_type, La, c)
def __init__(self, ct, La, c):
if ct.is_finite():
cat = ClassicalCrystals()
else:
cat = (RegularCrystals(), HighestWeightCrystals(), InfiniteEnumeratedSets())
InfinityCrystalOfNakajimaMonomials.__init__(self, ct, c, cat)
self._cartan_type = ct
self.hw = La
gen = {(i, 0): c for (i, c) in La}
self.module_generators = (self.element_class(self, gen, {}),)
def _repr_(self):
return 'Highest weight crystal of modified Nakajima monomials of Cartan type {1!s} and highest weight {0!s}'.format(self.hw, self._cartan_type)
def cardinality(self):
if (not self.cartan_type().is_finite()):
return Infinity
return super(InfinityCrystalOfNakajimaMonomials, self).cardinality()
Element = CrystalOfNakajimaMonomialsElement
|
def split_on_phrase_rgx(sentences, doc, rgx, threshold=250):
splits = []
for sent in sentences:
matches = re.findall(rgx, sent.text)
if ((len(sent.text) >= threshold) and matches):
offset = sent[0].idx
m_idxs = set()
for m in matches:
m_idxs.add((sent.text.index(m) + offset))
idxs = [sent[0].i]
idxs += [word.i for word in sent if (word.idx in m_idxs)]
idxs += [(sent[(- 1)].i + 1)]
idxs = sorted(list(set(idxs)))
for i in range((len(idxs) - 1)):
splits.append(doc[idxs[i]:idxs[(i + 1)]])
else:
splits.append(sent)
return splits
|
_module()
class ImgInpaintingDataset(BaseDataset):
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False):
super().__init__(pipeline, test_mode)
self.ann_file = str(ann_file)
self.data_prefix = str(data_prefix)
self.data_infos = self.load_annotations()
def load_annotations(self):
with open(self.ann_file, 'r') as f:
img_infos = []
for (idx, line) in enumerate(f):
line = line.strip()
_info = dict()
line_split = line.split(' ')
_info = dict(gt_img_path=Path(self.data_prefix).joinpath(line_split[0]).as_posix(), gt_img_idx=idx)
img_infos.append(_info)
return img_infos
def evaluate(self, outputs, logger=None, **kwargs):
metric_keys = outputs[0]['eval_result'].keys()
stats = {}
for key in metric_keys:
val = sum([x['eval_result'][key] for x in outputs])
val /= self.__len__()
stats[key] = val
return stats
|
_REGISTRY.register()
class Imagenet(torch.utils.data.Dataset):
def __init__(self, cfg, mode, num_retries=10):
self.num_retries = num_retries
self.cfg = cfg
self.mode = mode
self.data_path = cfg.DATA.PATH_TO_DATA_DIR
assert (mode in ['train', 'val', 'test']), "Split '{}' not supported for ImageNet".format(mode)
logger.info('Constructing ImageNet {}...'.format(mode))
if (cfg.DATA.PATH_TO_PRELOAD_IMDB == ''):
self._construct_imdb()
else:
self._load_imdb()
def _load_imdb(self):
split_path = os.path.join(self.cfg.DATA.PATH_TO_PRELOAD_IMDB, f'{self.mode}.json')
with g_pathmgr.open(split_path, 'r') as f:
data = f.read()
self._imdb = json.loads(data)
def _construct_imdb(self):
split_path = os.path.join(self.data_path, self.mode)
logger.info('{} data path: {}'.format(self.mode, split_path))
split_files = g_pathmgr.ls(split_path)
self._class_ids = sorted((f for f in split_files if re.match('^n[0-9]+$', f)))
self._class_id_cont_id = {v: i for (i, v) in enumerate(self._class_ids)}
self._imdb = []
for class_id in self._class_ids:
cont_id = self._class_id_cont_id[class_id]
im_dir = os.path.join(split_path, class_id)
for im_name in g_pathmgr.ls(im_dir):
im_path = os.path.join(im_dir, im_name)
self._imdb.append({'im_path': im_path, 'class': cont_id})
logger.info('Number of images: {}'.format(len(self._imdb)))
logger.info('Number of classes: {}'.format(len(self._class_ids)))
def load_image(self, im_path):
with g_pathmgr.open(im_path, 'rb') as f:
with Image.open(f) as im:
im = im.convert('RGB')
im = torch.from_numpy((np.array(im).astype(np.float32) / 255.0))
im = im.permute([2, 0, 1])
return im
def _prepare_im_res(self, im_path):
im = self.load_image(im_path)
(train_size, test_size) = (self.cfg.DATA.TRAIN_CROP_SIZE, self.cfg.DATA.TEST_CROP_SIZE)
if (self.mode == 'train'):
im = transform.random_sized_crop_img(im, train_size, jitter_scale=self.cfg.DATA.TRAIN_JITTER_SCALES_RELATIVE, jitter_aspect=self.cfg.DATA.TRAIN_JITTER_ASPECT_RELATIVE)
(im, _) = transform.horizontal_flip(prob=0.5, images=im)
im = transform.lighting_jitter(im, 0.1, self.cfg.DATA.TRAIN_PCA_EIGVAL, self.cfg.DATA.TRAIN_PCA_EIGVEC)
else:
(im, _) = transform.uniform_crop(im, test_size, spatial_idx=1, scale_size=train_size)
im = transform.color_normalization(im, self.cfg.DATA.MEAN, self.cfg.DATA.STD)
return im
def _prepare_im_tf(self, im_path):
with g_pathmgr.open(im_path, 'rb') as f:
with Image.open(f) as im:
im = im.convert('RGB')
(train_size, test_size) = (self.cfg.DATA.TRAIN_CROP_SIZE, self.cfg.DATA.TEST_CROP_SIZE)
if (self.mode == 'train'):
aug_transform = transforms_imagenet_train(img_size=(train_size, train_size), color_jitter=self.cfg.AUG.COLOR_JITTER, auto_augment=self.cfg.AUG.AA_TYPE, interpolation=self.cfg.AUG.INTERPOLATION, re_prob=self.cfg.AUG.RE_PROB, re_mode=self.cfg.AUG.RE_MODE, re_count=self.cfg.AUG.RE_COUNT, mean=self.cfg.DATA.MEAN, std=self.cfg.DATA.STD)
else:
t = []
size = int(((256 / 224) * test_size))
t.append(transforms_tv.Resize(size, interpolation=3))
t.append(transforms_tv.CenterCrop(test_size))
t.append(transforms_tv.ToTensor())
t.append(transforms_tv.Normalize(self.cfg.DATA.MEAN, self.cfg.DATA.STD))
aug_transform = transforms_tv.Compose(t)
im = aug_transform(im)
return im
def __load__(self, index):
try:
im_path = self._imdb[index]['im_path']
if self.cfg.AUG.ENABLE:
if ((self.mode == 'train') and (self.cfg.AUG.NUM_SAMPLE > 1)):
im = []
for _ in range(self.cfg.AUG.NUM_SAMPLE):
crop = self._prepare_im_tf(im_path)
im.append(crop)
return im
else:
im = self._prepare_im_tf(im_path)
return im
else:
im = self._prepare_im_res(im_path)
return im
except Exception:
return None
def __getitem__(self, index):
for _ in range(self.num_retries):
im = self.__load__(index)
if (im is None):
index = random.randint(0, (len(self._imdb) - 1))
else:
break
label = self._imdb[index]['class']
if isinstance(im, list):
label = [label for _ in range(len(im))]
dummy = [torch.Tensor() for _ in range(len(im))]
return (im, label, dummy, {})
else:
dummy = torch.Tensor()
return ([im], label, dummy, {})
def __len__(self):
return len(self._imdb)
|
def ctest_args(args_list):
parser = argparse.ArgumentParser(description='Compare two npz tensor files.')
parser.add_argument('npz_file', help='Reference file with fp32 data')
parser.add_argument('--calibration_table', type=str, required=True, help='calibration table of npz file')
args = parser.parse_args(args_list)
return args
|
def download(out_dir, category, set_name, tag):
url = '
if (set_name == 'test'):
out_name = 'test_lmdb.zip'
else:
out_name = '{category}_{set_name}_lmdb.zip'.format(**locals())
out_path = os.path.join(out_dir, out_name)
print(url, out_path)
cmd = ['curl', url, '-o', out_path]
print('Downloading', category, set_name, 'set')
subprocess.call(cmd)
|
_level_function(module='ak.str')
def replace_substring_regex(array, pattern, replacement, *, max_replacements=None, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, pattern, replacement, max_replacements, highlevel, behavior, attrs)
|
def tokens_to_PartStaff(tokens, key_=0, start_voice=1):
tokens = concatenated_to_regular(tokens)
p = stream.PartStaff()
k = key.KeySignature(key_)
voice_id = start_voice
voice_flag = False
after_voice = False
voice_start = None
ottava_flag = False
ottava_elements = []
tokens = aggr_note_token(tokens)
for (i, t) in enumerate(tokens):
if (t == 'bar'):
if (i != 0):
p.append(m)
m = stream.Measure()
voice_id = start_voice
voice_start = None
voice_flag = False
after_voice = False
elif (t == '<voice>'):
v = stream.Voice(id=voice_id)
voice_flag = True
if (voice_start is None):
voice_start = m.duration.quarterLength
elif (t == '</voice>'):
if voice_flag:
v.makeAccidentals(useKeySignature=k)
for element in v:
element.offset += voice_start
m.append(v)
voice_id += 1
voice_flag = False
after_voice = True
elif (t.split('_')[0] in ('clef', 'key', 'time')):
if ((t[:11] == 'key_natural') and ((i + 1) < len(tokens)) and (tokens[(i + 1)].split('_')[0] == 'key')):
continue
o = single_token_to_obj(t)
if voice_flag:
v.append(o)
else:
m.append(o)
if (t.split('_')[0] == 'key'):
k = o
elif (t[:4] in ('note', 'rest')):
n = note_token_to_obj(t.split(), k)
if ottava_flag:
ottava_elements.append(n)
if voice_flag:
v.append(n)
else:
m.append(n)
if after_voice:
n.offset -= (v.quarterLength * (voice_id - 1))
p.append(m)
p.makeAccidentals()
return p
|
def load_backward(state):
new_state = collections.OrderedDict()
for (key, val) in state.items():
multi = False
if key.startswith('module.'):
multi = True
key = key[len('module.'):]
if (key == 'true_help'):
continue
if key.startswith('bert_q.'):
continue
if key.startswith('linear.'):
continue
if key.startswith('bert.'):
key = ('encoder.' + key)
if multi:
key = ('module.' + key)
new_state[key] = val
return new_state
|
class FileHandler(StreamHandler):
def __init__(self, filename, mode='a', encoding=None, delay=False):
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
with tf.io.gfile.GFile(self.baseFilename, 'w') as f:
f.write('Logging \n')
def close(self):
self.acquire()
try:
try:
if self.stream:
try:
self.flush()
finally:
stream = self.stream
self.stream = None
if hasattr(stream, 'close'):
stream.close()
finally:
StreamHandler.close(self)
finally:
self.release()
def _open(self):
return tf.io.gfile.GFile(self.baseFilename, self.mode)
def emit(self, record):
if (self.stream is None):
self.stream = self._open()
StreamHandler.emit(self, record)
def __repr__(self):
level = getLevelName(self.level)
return ('<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level))
|
class ConvertToPyTorchModel(nn.Module):
def __init__(self, base_model, classify_fn_args, classify=None, normalization=None, class_sublist=None, adversarial_attack=None):
super().__init__()
if (normalization is not None):
self.input_space = normalization.input_space
self.mean = nn.Parameter(torch.tensor(normalization.mean).float().view(3, 1, 1))
self.std = nn.Parameter(torch.tensor(normalization.std).float().view(3, 1, 1))
self.base_model = base_model
self.classify_fn_args = classify_fn_args
self.classify = classify
self.class_sublist = class_sublist
self.adversarial_attack = adversarial_attack
self.normalization = normalization
def forward(self, x):
if (self.normalization is not None):
if (self.input_space == 'BGR'):
x = x.flip(1)
x = ((x - self.mean) / self.std)
if (self.classify is None):
x = self.base_model(x)
else:
kwargs = {'images': x, 'model': self.base_model}
if ('class_sublist' in self.classify_fn_args):
kwargs['class_sublist'] = self.class_sublist
if ('adversarial_attack' in self.classify_fn_args):
kwargs['adversarial_attack'] = self.adversarial_attack
x = self.classify(**kwargs)
if ((self.class_sublist is not None) and ('class_sublist' not in self.classify_fn_args)):
x = x.t()[self.class_sublist].t()
return x
|
def make_embeddings(opt, word_dict, for_encoder=True):
embedding_dim = opt.word_vec_size
word_padding_idx = word_dict.to_ind(markers.PAD)
num_word_embeddings = len(word_dict)
return Embeddings(word_vec_size=embedding_dim, position_encoding=False, dropout=opt.dropout, word_padding_idx=word_padding_idx, word_vocab_size=num_word_embeddings)
|
def get_user_detail(user_id, html):
user = person.get_detail(html, user_id)
if (user is not None):
user.uid = user_id
user.follows_num = person.get_friends(html)
user.fans_num = person.get_fans(html)
user.wb_num = person.get_status(html)
return user
|
def test_ClusterNodeSequence_getitem():
G = create_stellargraph()
nsg = ClusterNodeSequence(graph=G, clusters=[['a'], ['b'], ['c'], ['d']], node_ids=['a', 'b', 'd'])
assert (len(nsg) == 4)
for cluster in list(nsg):
print(cluster)
assert (len(cluster) == 2)
assert (len(cluster[0][0]) == 1)
assert (len(cluster[0][1]) == 1)
assert (cluster[0][2].shape == (1, 1, 1))
assert (cluster[1] is None)
assert (len(nsg.node_order) == 3)
nodes = set()
for node in nsg.node_order:
nodes.add(node)
assert (len(nodes.intersection(['a', 'b', 'd'])) == 3)
|
def proximal_policy_optimization_loss(curr_prediction, curr_onehot, old_prediction, old_onehotpred, rewards, advantage, clip_val, beta=None):
rewards_ = tf.squeeze(rewards, axis=1)
advantage_ = tf.squeeze(advantage, axis=1)
entropy = 0
r = 1
for (t, (p, onehot, old_p, old_onehot)) in enumerate(zip(curr_prediction, curr_onehot, old_prediction, old_onehotpred)):
ll_t = tf.log(tf.reduce_sum((old_onehot * p)))
ll_0 = tf.log(tf.reduce_sum((old_onehot * old_p)))
r_t = tf.exp((ll_t - ll_0))
r = (r * r_t)
entropy += (- tf.reduce_mean(tf.log(tf.reduce_sum((onehot * p), axis=1))))
surr_obj = tf.reduce_mean((tf.abs((1 / (rewards_ + 1e-08))) * tf.minimum((r * advantage_), (tf.clip_by_value(r, clip_value_min=(1 - clip_val), clip_value_max=(1 + clip_val)) * advantage_))))
if beta:
return ((- surr_obj) + (beta * (- entropy)))
else:
return (- surr_obj)
|
class DropPath(nn.Module):
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
|
_model_architecture('transformer_lm', 'transformer_lm_gpt3_6_7')
def transformer_lm_gpt3_6_7(args):
args.decoder_layers = safe_getattr(args, 'decoder_layers', 32)
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 4096)
args.decoder_attention_heads = safe_getattr(args, 'decoder_attention_heads', 32)
base_gpt3_architecture(args)
|
class Counter():
def __init__(self):
self.count = 0
def trigger(self, detector, info):
self.count += 1
|
class AttentionModule(AbstractMILUnit):
def add_layers(self):
self.parent_module.mil_attn_V = nn.Linear((512 * 4), 128, bias=False)
self.parent_module.mil_attn_U = nn.Linear((512 * 4), 128, bias=False)
self.parent_module.mil_attn_w = nn.Linear(128, 1, bias=False)
self.parent_module.classifier_linear = nn.Linear((512 * 4), self.parameters['num_classes'], bias=False)
def forward(self, h_crops):
(batch_size, num_crops, h_dim) = h_crops.size()
h_crops_reshape = h_crops.view((batch_size * num_crops), h_dim)
attn_projection = (torch.sigmoid(self.parent_module.mil_attn_U(h_crops_reshape)) * torch.tanh(self.parent_module.mil_attn_V(h_crops_reshape)))
attn_score = self.parent_module.mil_attn_w(attn_projection)
attn_score_reshape = attn_score.view(batch_size, num_crops)
attn = F.softmax(attn_score_reshape, dim=1)
z_weighted_avg = torch.sum((attn.unsqueeze((- 1)) * h_crops), 1)
y_crops = self.parent_module.classifier_linear(z_weighted_avg)
return (z_weighted_avg, attn, y_crops)
|
def _is_batch_set(obj: Any) -> bool:
if isinstance(obj, np.ndarray):
return ((obj.dtype == object) and all((isinstance(element, (dict, Batch)) for element in obj)))
elif isinstance(obj, (list, tuple)):
if ((len(obj) > 0) and all((isinstance(element, (dict, Batch)) for element in obj))):
return True
return False
|
('mpi4py.MPI.COMM_WORLD.Bcast')
('dace.comm.Bcast')
def _bcast(pv: ProgramVisitor, sdfg: SDFG, state: SDFGState, buffer: str, root: Union[(str, sp.Expr, Number)]=0, grid: str=None, fcomm: str=None):
from dace.libraries.mpi.nodes.bcast import Bcast
libnode = Bcast('_Bcast_', grid, fcomm)
desc = sdfg.arrays[buffer]
in_buffer = state.add_read(buffer)
out_buffer = state.add_write(buffer)
if (isinstance(root, str) and (root in sdfg.arrays.keys())):
root_node = state.add_read(root)
else:
storage = desc.storage
root_name = _define_local_scalar(pv, sdfg, state, dace.int32, storage)
root_node = state.add_access(root_name)
root_tasklet = state.add_tasklet('_set_root_', {}, {'__out'}, '__out = {}'.format(root))
state.add_edge(root_tasklet, '__out', root_node, None, Memlet.simple(root_name, '0'))
state.add_edge(in_buffer, None, libnode, '_inbuffer', Memlet.from_array(buffer, desc))
state.add_edge(root_node, None, libnode, '_root', Memlet.simple(root_node.data, '0'))
state.add_edge(libnode, '_outbuffer', out_buffer, None, Memlet.from_array(buffer, desc))
return None
|
def _utt2spk_keydict(path):
utt2spk = {}
with open(path, 'r') as fi:
for line in fi:
(utt, spk) = line.strip().split()
utt2spk[utt] = spk
return utt2spk
|
class FiniteDimensionalSemisimpleAlgebrasWithBasis(CategoryWithAxiom_over_base_ring):
_base_category_class_and_axiom = (SemisimpleAlgebras.FiniteDimensional, 'WithBasis')
class ParentMethods():
def radical_basis(self, **keywords):
return ()
_method
def central_orthogonal_idempotents(self):
return tuple([x.lift() for x in self.center().central_orthogonal_idempotents()])
class Commutative(CategoryWithAxiom_over_base_ring):
class ParentMethods():
_method
def _orthogonal_decomposition(self, generators=None):
if (self.dimension() == 1):
return self.basis().list()
category = Algebras(self.base_ring()).Semisimple().WithBasis().FiniteDimensional().Commutative().Subobjects()
if (generators is None):
generators = self.basis().list()
for gen in generators:
phi = self.module_morphism(on_basis=(lambda i: (gen * self.term(i))), codomain=self)
eigenspaces = phi.matrix().eigenspaces_right()
if (len(eigenspaces) >= 2):
subalgebras = [self.submodule(map(self.from_vector, eigenspace.basis()), category=category) for (eigenvalue, eigenspace) in eigenspaces]
return tuple([idempotent.lift() for subalgebra in subalgebras for idempotent in subalgebra._orthogonal_decomposition()])
raise Exception(('Unable to fully decompose %s!' % self))
_method
def central_orthogonal_idempotents(self):
return tuple([((e.leading_coefficient() / (e * e).leading_coefficient()) * e) for e in self._orthogonal_decomposition()])
|
def get_prediction(img_path, threshold):
img = Image.open(img_path)
transform = T.Compose([T.ToTensor()])
img = transform(img)
pred = model([img])
pred_score = list(pred[0]['scores'].detach().numpy())
pred_t = [pred_score.index(x) for x in pred_score if (x > threshold)][(- 1)]
masks = (pred[0]['masks'] > 0.5).squeeze().detach().cpu().numpy()
pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())]
masks = masks[:(pred_t + 1)]
pred_boxes = pred_boxes[:(pred_t + 1)]
pred_class = pred_class[:(pred_t + 1)]
return (masks, pred_boxes, pred_class)
|
class SimpleTaggerTest(ModelTestCase):
def setUp(self):
super(SimpleTaggerTest, self).setUp()
self.set_up_model('tests/fixtures/simple_tagger/experiment.json', 'tests/fixtures/data/sequence_tagging.tsv')
def test_simple_tagger_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
output_dict = self.model.decode(output_dict)
class_probs = output_dict['class_probabilities'][0].data.numpy()
numpy.testing.assert_almost_equal(numpy.sum(class_probs, (- 1)), numpy.array([1, 1, 1, 1]))
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
params['model']['stacked_encoder']['input_size'] = 10
with pytest.raises(ConfigurationError):
Model.from_params(self.vocab, params.pop('model'))
def test_regularization(self):
penalty = self.model.get_regularization_penalty()
assert (penalty == 0)
iterator = BasicIterator(batch_size=32)
trainer = Trainer(self.model, None, iterator, self.dataset)
training_batch = next(iterator(self.dataset, num_epochs=1))
validation_batch = next(iterator(self.dataset, num_epochs=1))
training_loss = trainer._batch_loss(training_batch, for_training=True).data
validation_loss = trainer._batch_loss(validation_batch, for_training=False).data
assert (training_loss == validation_loss).all()
|
def test_multi_objective_empty_losses():
with pytest.raises(ValueError):
multi_cdv.get_descent_vector([], gradient)
|
def _make_imitator_inputs(trainer: transformers.Trainer, task_model: torch.nn.Module, inputs: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
(logits, _, _) = misc_utils.predict(trainer=trainer, model=task_model, inputs=inputs)
imitator_inputs = deepcopy(inputs)
imitator_inputs['labels'] = torch.tensor(logits.argmax(axis=1))
return imitator_inputs
|
def qqp_logits_sentence_encoding(s1_rep, s2_rep, afn, n_state, is_train, clf_dropout, highway=False):
out_rep = tf.concat([tf.abs((s1_rep - s2_rep)), (s1_rep * s2_rep)], (- 1))
act = act_name2fn(afn)
h = act(conv1d(out_rep, 'c_fc', n_state, 1, train=is_train))
if highway:
trans = conv1d(h, 'c_trans', n_state, 1, train=is_train)
gate = tf.nn.sigmoid(conv1d(h, 'c_gate', n_state, 1, train=is_train))
h = ((gate * trans) + ((1 - gate) * h))
h_dp = dropout(h, clf_dropout, is_train)
return conv1d(h_dp, 'c_logits', 2, 1, train=is_train)
|
class MMProbe(t.nn.Module):
def __init__(self, direction, covariance=None, inv=None, atol=0.001):
super().__init__()
self.direction = t.nn.Parameter(direction, requires_grad=False)
if (inv is None):
self.inv = t.nn.Parameter(t.linalg.pinv(covariance, hermitian=True, atol=atol), requires_grad=False)
else:
self.inv = t.nn.Parameter(inv, requires_grad=False)
def forward(self, x, iid=False):
if iid:
return t.nn.Sigmoid()(((x self.inv) self.direction))
else:
return t.nn.Sigmoid()((x self.direction))
def pred(self, x, iid=False):
return self(x, iid=iid).round()
def from_data(acts, labels, atol=0.001, device='cpu'):
(acts, labels)
(pos_acts, neg_acts) = (acts[(labels == 1)], acts[(labels == 0)])
(pos_mean, neg_mean) = (pos_acts.mean(0), neg_acts.mean(0))
direction = (pos_mean - neg_mean)
centered_data = t.cat([(pos_acts - pos_mean), (neg_acts - neg_mean)], 0)
covariance = ((centered_data.t() centered_data) / acts.shape[0])
probe = MMProbe(direction, covariance=covariance).to(device)
return probe
|
def disable_autodiff_subgraph_inlining(enabled=True):
torch._C._debug_set_autodiff_subgraph_inlining((not enabled))
try:
(yield)
finally:
torch._C._debug_set_autodiff_subgraph_inlining(True)
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar', only_best=False, logdir=''):
resfile = os.path.join(logdir, filename)
if is_best:
torch.save(state, resfile)
shutil.copyfile(resfile, os.path.join(logdir, 'model_temp_best.pth.tar'))
os.remove(resfile)
if only_best:
shutil.copyfile(os.path.join(logdir, 'model_temp_best.pth.tar'), os.path.join(logdir, 'model_best.pth.tar'))
os.remove(os.path.join(logdir, 'model_temp_best.pth.tar'))
|
def build_transforms_hist(cfg, is_train=True, PIXEL_MEAN=[0.485, 0.456, 0.406], PIXEL_STD=[0.229, 0.224, 0.225]):
normalize_transform = T.Normalize(mean=PIXEL_MEAN, std=PIXEL_STD)
transform = T.Compose([T.Resize([cfg.height, cfg.width]), T.ToTensor()])
return transform
|
def get_cmd(task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch, warmup, model_dir, summary_dir, res_fn, max_steps=None, save_steps=None, log_steps=None):
if (max_steps is None):
cmd_str = ('bash exp_with_args.sh %s %s %s %d %d %d %d %d %d %d %d %d %s %s %s' % (task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch, warmup, model_dir, summary_dir, res_fn))
else:
cmd_str = ('bash exp_with_args.sh %s %s %s %d %d %d %d %d %d %d %d %d %s %s %s %d %d %d' % (task, sub_task, model_tag, gpu, data_num, bs, lr, source_length, target_length, patience, epoch, warmup, model_dir, summary_dir, res_fn, max_steps, save_steps, log_steps))
return cmd_str
|
def test_hub_modelcardhelper(request, save_path):
model = prep_model()
hmch = HubModelCardHelper(license_info='cc-by-4.0', model_cls_name='SCVI', model_init_params=model.init_params_, model_setup_anndata_args=model.adata_manager._get_setup_method_args()['setup_args'], model_summary_stats=model.summary_stats, model_data_registry=model.adata_manager.data_registry, scvi_version='0.17.8', anndata_version='0.8.0', tissues=['eye'])
assert (hmch.license_info == 'cc-by-4.0')
assert (hmch.model_cls_name == 'SCVI')
assert (hmch.model_init_params == {'kwargs': {'model_kwargs': {}}, 'non_kwargs': {'n_hidden': 128, 'n_latent': 10, 'n_layers': 1, 'dropout_rate': 0.1, 'dispersion': 'gene', 'gene_likelihood': 'zinb', 'latent_distribution': 'normal'}})
assert (hmch.model_setup_anndata_args == {'layer': None, 'batch_key': None, 'labels_key': None, 'size_factor_key': None, 'categorical_covariate_keys': None, 'continuous_covariate_keys': None})
assert (dict(hmch.model_summary_stats) == {'n_batch': 1, 'n_cells': 400, 'n_extra_categorical_covs': 0, 'n_extra_continuous_covs': 0, 'n_labels': 1, 'n_vars': 100})
assert (hmch.model_data_registry.keys() == ['X', 'batch', 'labels'])
assert (dict(hmch.model_data_registry['X']) == {'attr_key': None, 'attr_name': 'X'})
assert (dict(hmch.model_data_registry['batch']) == {'attr_key': '_scvi_batch', 'attr_name': 'obs'})
assert (dict(hmch.model_data_registry['labels']) == {'attr_key': '_scvi_labels', 'attr_name': 'obs'})
assert (hmch.scvi_version == '0.17.8')
assert (hmch.anndata_version == '0.8.0')
assert (hmch.data_modalities == [])
assert (hmch.tissues == ['eye'])
assert (hmch.data_is_annotated is None)
assert (hmch.data_is_minified is None)
assert (hmch.training_data_url is None)
assert (hmch.training_code_url is None)
assert (hmch.model_parent_module == 'scvi.model')
assert (hmch.description == 'To be added...')
assert (hmch.references == 'To be added...')
assert (hmch.model_card.data.to_dict() == {'license': 'cc-by-4.0', 'library_name': 'scvi-tools', 'tags': ['biology', 'genomics', 'single-cell', 'model_cls_name:SCVI', 'scvi_version:0.17.8', 'anndata_version:0.8.0', 'tissue:eye']})
test_save_path = os.path.join(save_path, request.node.name)
model.save(test_save_path, overwrite=True, save_anndata=True)
hmch = HubModelCardHelper.from_dir(test_save_path, license_info='cc-by-4.0', anndata_version='0.8.0', model_parent_module='other_module')
assert (hmch.license_info == 'cc-by-4.0')
assert (hmch.model_cls_name == 'SCVI')
assert (hmch.model_init_params == model.init_params_)
assert (hmch.model_setup_anndata_args == model.adata_manager._get_setup_method_args()['setup_args'])
assert (hmch.model_summary_stats == dict(model.summary_stats))
assert (hmch.model_data_registry == dict(model.adata_manager.data_registry))
assert (hmch.scvi_version == scvi.__version__)
assert (hmch.anndata_version == '0.8.0')
assert (hmch.data_modalities == [])
assert (hmch.tissues == [])
assert (hmch.data_is_annotated is None)
assert (hmch.data_is_minified is False)
assert (hmch.training_data_url is None)
assert (hmch.training_code_url is None)
assert (hmch.model_parent_module == 'other_module')
assert (hmch.description == 'To be added...')
assert (hmch.references == 'To be added...')
assert (hmch.model_card.data.to_dict() == {'license': 'cc-by-4.0', 'library_name': 'scvi-tools', 'tags': ['biology', 'genomics', 'single-cell', 'model_cls_name:SCVI', f'scvi_version:{scvi.__version__}', 'anndata_version:0.8.0']})
|
def test_angular_neighbors():
vectors = [[0, 0, 1], [0, 0, 3], [1, 2, 3], [(- 1), (- 2), (- 3)]]
neighbors = angular_neighbors(vectors, 2)
true_neighbors = np.array([[1, 2], [0, 2], [0, 1], [0, 1]])
assert_equal(neighbors, true_neighbors)
|
def prelu_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, base_axis=1):
dy = grad_inputs[0]
x0 = inputs[0]
w0 = inputs[1]
base_axis += (x0.ndim * (base_axis < 0))
m0 = F.greater_scalar(x0, 0)
m1 = (1 - m0)
m0 = no_grad(m0)
m1 = no_grad(m1)
if (w0.shape == ()):
reshape = [1 for i in range(len(x0.shape))]
w0 = F.reshape(w0, reshape, inplace=False)
dw0 = F.sum(((dy * x0) * m1))
else:
reshape = [(w0.shape[0] if (i == base_axis) else 1) for i in range(len(x0.shape))]
w0 = F.reshape(w0, reshape, inplace=False)
raxes = [i for i in range(len(x0.shape)) if (i != base_axis)]
dw0 = F.sum(((dy * x0) * m1), raxes, keepdims=False)
dx0 = (dy * (m0 + (w0 * m1)))
return (dx0, dw0)
|
def _real_entropy_individual(traj):
time_series = tuple(map(tuple, traj[[constants.LATITUDE, constants.LONGITUDE]].values))
entropy = _true_entropy(time_series)
return entropy
|
def evaluate(model: Model, instances: Iterable[Instance], data_iterator: DataIterator, cuda_device: int, label_fname: str) -> Dict[(str, Any)]:
_warned_tqdm_ignores_underscores = False
check_for_gpu(cuda_device)
with torch.no_grad():
model.eval()
label_file = open(label_fname, 'w')
label_file.write('real_label,guessed_label\n')
iterator = data_iterator(instances, num_epochs=1, shuffle=False)
logger.info('Iterating over dataset')
generator_tqdm = Tqdm.tqdm(iterator, total=data_iterator.get_num_batches(instances))
total_num_inst = 0
for batch in generator_tqdm:
num_inst = batch['tokens']['tokens'].size(0)
total_num_inst += num_inst
batch = util.move_to_device(batch, cuda_device)
output_dict = model(**batch)
if (cuda_device == (- 1)):
output_matrix = output_dict['label_logits'].data.numpy()
else:
output_matrix = output_dict['label_logits'].data.cpu().numpy()
output_labels = np.argmax(output_matrix, axis=1)
if (cuda_device == (- 1)):
true_labels = batch['label'].data.numpy()
else:
true_labels = batch['label'].data.cpu().numpy()
assert (true_labels.shape[0] == output_labels.shape[0])
for i in range(true_labels.shape[0]):
label_file.write((str(int(true_labels[i])) + ','))
label_file.write((str(int(output_labels[i])) + '\n'))
metrics = model.get_metrics()
if ((not _warned_tqdm_ignores_underscores) and any((metric_name.startswith('_') for metric_name in metrics))):
logger.warning('Metrics with names beginning with "_" will not be logged to the tqdm progress bar.')
_warned_tqdm_ignores_underscores = True
description = (', '.join([('%s: %.2f' % (name, value)) for (name, value) in metrics.items() if (not name.startswith('_'))]) + ' ||')
generator_tqdm.set_description(description, refresh=False)
print(('NUM INSTANCES ITERATED OVER: ' + str(total_num_inst)))
label_file.close()
return model.get_metrics(reset=True)
|
class Conv2d(_ConvNd):
_FLOAT_MODULE = nn.Conv2d
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, False, _pair(0), groups, bias, padding_mode)
def _get_name(self):
return 'QuantizedConv2d'
def set_weight_bias(self, w, b):
self._packed_params = torch.ops.quantized.conv2d_prepack(w, b, self.stride, self.padding, self.dilation, self.groups)
def _weight_bias(self):
return self._packed_params.unpack()
def weight(self):
return self._weight_bias()[0]
def bias(self):
return self._weight_bias()[1]
def forward(self, input):
if (len(input.shape) != 4):
raise ValueError('Input shape must be `(N, C, H, W)`!')
return ops.quantized.conv2d(input, self._packed_params, self.scale, self.zero_point)
def from_float(cls, mod):
if hasattr(mod, 'weight_fake_quant'):
if (type(mod) == nniqat.ConvBn2d):
(mod.weight, mod.bias) = fuse_conv_bn_weights(mod.weight, mod.bias, mod.bn.running_mean, mod.bn.running_var, mod.bn.eps, mod.bn.weight, mod.bn.bias)
assert hasattr(mod, 'activation_post_process'), 'Input QAT module must have observer attached'
weight_post_process = mod.weight_fake_quant
activation_post_process = mod.activation_post_process
else:
assert (type(mod) == cls._FLOAT_MODULE), (((' nnq.' + cls.__name__) + '.from_float only works for ') + cls._FLOAT_MODULE.__name__)
assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined.'
if (type(mod) == nni.ConvReLU2d):
activation_post_process = mod[1].activation_post_process
mod = mod[0]
else:
activation_post_process = mod.activation_post_process
weight_post_process = mod.qconfig.weight()
return cls.get_qconv(mod, activation_post_process, weight_post_process)
|
class HMGNN(nn.Module):
def __init__(self, num_convs, dg_node_type_universe, lg_node_type_universe, dg_num_interaction_residuals, lg_num_interaction_residuals, dg_num_residuals, lg_num_residuals, rbf_dim, cut_r, dg_mean, lg_mean, dg_std, lg_std, hidden_dim, activation, feat_drop):
super(HMGNN, self).__init__()
self.num_convs = num_convs
self.activation = activation
self.dg_input_module = DistGraphInputModule(dg_node_type_universe, rbf_dim, hidden_dim, cut_r, activation)
self.lg_input_module = LineGraphInputModule(lg_node_type_universe, rbf_dim, rbf_dim, hidden_dim, cut_r, activation)
self.dg_interaction_layer = nn.ModuleList()
self.lg_interaction_layer = nn.ModuleList()
for _ in range(num_convs):
self.dg_interaction_layer.append(HoConv('atom', hidden_dim, dg_num_interaction_residuals, dg_num_residuals, activation, feat_drop))
self.lg_interaction_layer.append(HoConv('bond', hidden_dim, lg_num_interaction_residuals, lg_num_residuals, activation, feat_drop))
self.dg_output_module = OutputModule('atom', dg_node_type_universe, hidden_dim, activation, dg_mean, dg_std)
self.lg_output_module = OutputModule('bond', lg_node_type_universe, hidden_dim, activation, lg_mean, lg_std)
self.fussion_layer = FussionModule(2, hidden_dim, activation)
def forward(self, batch_hg, dg_node_feat_discrete, lg_node_feat_continuous, lg_node_feat_discrete, dg_edge_feat, lg_edge_feat):
(dg_h, dg_eh) = self.dg_input_module(dg_node_feat_discrete, dg_edge_feat)
(lg_h, lg_eh) = self.lg_input_module(lg_node_feat_continuous, lg_node_feat_discrete, lg_edge_feat)
for i in range(self.num_convs):
dg_h_new = self.dg_interaction_layer[i](batch_hg, dg_h, dg_eh, lg_h)
lg_h = self.lg_interaction_layer[i](batch_hg, lg_h, lg_eh, dg_h)
dg_h = dg_h_new
(dg_graph_feat, dg_node_pred) = self.dg_output_module(batch_hg, dg_h, dg_node_feat_discrete)
(lg_graph_feat, lg_node_pred) = self.lg_output_module(batch_hg, lg_h, lg_node_feat_discrete)
graph_feat = torch.cat([dg_graph_feat, lg_graph_feat], dim=1)
score = torch.cat([dg_node_pred, lg_node_pred], dim=1)
(pred, attn_score) = self.fussion_layer(graph_feat, score)
return (dg_node_pred, lg_node_pred, pred, attn_score)
|
class PatchInferencer():
def __init__(self, model_weight_file, output_patch_mask):
self.output_patch_mask = output_patch_mask
sys.path.append(model_weight_file)
from pznet.pznet import PZNet
self.net = PZNet(model_weight_file)
def compute_device(self):
return platform.processor()
def __call__(self, input_patch):
output_patch = self.net.forward(input_patch)
output_patch *= self.output_patch_mask
return output_patch
|
def romanian_preprocessing(text):
text = text.replace('S', 'S').replace('s', 's')
text = text.replace('T', 'T').replace('t', 't')
text = text.replace('S', 'S').replace('s', 's')
text = text.replace('T', 'T').replace('t', 't')
text = text.replace('A', 'A').replace('a', 'a')
text = text.replace('A', 'A').replace('a', 'a')
text = text.replace('I', 'I').replace('i', 'i')
return text
|
class TestRegression(object):
def test_masked_array_create(self):
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
def test_masked_array(self):
np.ma.array(1, mask=[1])
def test_mem_masked_where(self):
from numpy.ma import masked_where, MaskType
a = np.zeros((1, 1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b, a)
(a - c)
def test_masked_array_multiply(self):
a = np.ma.zeros((4, 1))
a[(2, 0)] = np.ma.masked
b = np.zeros((4, 2))
(a * b)
(b * a)
def test_masked_array_repeat(self):
np.ma.array([1], mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
repr(np.ma.array(u'Unicode'))
def test_atleast_2d(self):
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_((a.mask.ndim == 1))
assert_((b.mask.ndim == 2))
def test_set_fill_value_unicode_py3(self):
a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
a.fill_value = 'X'
assert_((a.fill_value == 'X'))
def test_var_sets_maskedarray_scalar(self):
a = np.ma.array(np.arange(5), mask=True)
mout = np.ma.array((- 1), dtype=float)
a.var(out=mout)
assert_((mout._data == 0))
def test_ddof_corrcoef(self):
x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
y = np.array([2, 2.5, 3.1, 3, 5])
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, 'bias and ddof have no effect')
r0 = np.ma.corrcoef(x, y, ddof=0)
r1 = np.ma.corrcoef(x, y, ddof=1)
assert_allclose(r0.data, r1.data)
def test_mask_not_backmangled(self):
a = np.ma.MaskedArray([1.0, 2.0], mask=[False, False])
assert_((a.mask.shape == (2,)))
b = np.tile(a, (2, 1))
assert_((a.mask.shape == (2,)))
assert_((b.shape == (2, 2)))
assert_((b.mask.shape == (2, 2)))
def test_empty_list_on_structured(self):
ma = np.ma.MaskedArray([(1, 1.0), (2, 2.0), (3, 3.0)], dtype='i4,f4')
assert_array_equal(ma[[]], ma[:0])
def test_masked_array_tostring_fortran(self):
ma = np.ma.arange(4).reshape((2, 2))
assert_array_equal(ma.tostring(order='F'), ma.T.tostring())
|
def test_from_pandas_contextual_severity():
anomalies = pd.DataFrame({'start': [2, 8], 'end': [5, 9], 'severity': [0.1, 0.2]})
expected_return = [(2, 5, 0.1), (8, 9, 0.2)]
returned = from_pandas_contextual(anomalies)
assert_list_tuples(returned, expected_return)
|
def test_big():
a = ak.highlevel.ArrayBuilder(initial=90)
for i in range(2000):
if (i == 200):
tmp = a.snapshot()
a.boolean(((i % 2) == 0))
assert (to_list(a) == ([True, False] * 1000))
assert (to_list(tmp) == ([True, False] * 100))
|
.parametrize('basis, quad', ((list(product(ctrialBasis, cquads)) + list(product(ltrialBasis, lquads))) + list(product(latrialBasis, lagquads))))
def test_div2(basis, quad):
B = basis(10, quad=quad)
u = shenfun.TrialFunction(B)
v = shenfun.TestFunction(B)
m = inner(u, v)
z = Function(B, val=1)
c = (m / z)
c2 = Function(B)
c2 = m.solve(z, c2)
assert np.allclose(c2[B.slice()], c[B.slice()])
|
def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (samples, _)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
samples = samples.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
(loss, _, _) = model(samples, mask_ratio=args.mask_ratio)
loss_value = loss.item()
loss /= accum_iter
loss_scaler(loss, optimizer, parameters=model.parameters(), update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]['lr']
metric_logger.update(lr=lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if (not math.isfinite(loss_value_reduce)):
print('Loss is {}, stopping training'.format(loss_value_reduce))
sys.exit(1)
if ((log_writer is not None) and (((data_iter_step + 1) % accum_iter) == 0)):
epoch_1000x = int((((data_iter_step / len(data_loader)) + epoch) * 1000))
log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', lr, epoch_1000x)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
class SensitivityExplanation(ExplanationBase):
def __init__(self):
super().__init__()
self.explanations = defaultdict(dict)
def add(self, feature_name, mu, mu_star, sigma, mu_star_conf):
self.explanations[feature_name] = {'mu': mu, 'mu_star': mu_star, 'sigma': sigma, 'mu_star_conf': mu_star_conf}
def get_explanations(self):
return self.explanations
def plot(self, **kwargs):
import matplotlib.pyplot as plt
features = list(self.explanations.keys())
results = {'mu': [self.explanations[f]['mu'] for f in features], 'mu_star': [self.explanations[f]['mu_star'] for f in features], 'sigma': [self.explanations[f]['sigma'] for f in features], 'mu_star_conf': [self.explanations[f]['mu_star_conf'] for f in features]}
(fig, axes) = plt.subplots(2, 2, squeeze=False)
for (i, name) in enumerate(['mu', 'mu_star', 'sigma', 'mu_star_conf']):
plt.sca(axes[((i // 2), (i % 2))])
plt.barh([self._s(f, max_len=10) for f in features], results[name])
plt.ylabel(name)
plt.xlabel('Sensitivity')
plt.grid()
return fig
def _plotly_figure(self, **kwargs):
import plotly.express as px
from plotly.subplots import make_subplots
features = list(self.explanations.keys())
results = {'mu': [self.explanations[f]['mu'] for f in features], 'mu_star': [self.explanations[f]['mu_star'] for f in features], 'sigma': [self.explanations[f]['sigma'] for f in features], 'mu_star_conf': [self.explanations[f]['mu_star_conf'] for f in features]}
fig = make_subplots(rows=2, cols=2, subplot_titles=['mu', 'mu_star', 'sigma', 'mu_star_conf'])
for (i, name) in enumerate(['mu', 'mu_star', 'sigma', 'mu_star_conf']):
(r, c) = ((i // 2), (i % 2))
_fig = px.bar(y=[self._s(f, max_len=10) for f in features], x=results[name], orientation='h', labels={'x': 'Sensitivity', 'y': name})
fig.add_trace(_fig.data[0], row=(r + 1), col=(c + 1))
return fig
def plotly_plot(self, **kwargs):
return DashFigure(self._plotly_figure(**kwargs))
def ipython_plot(self, **kwargs):
import plotly
plotly.offline.iplot(self._plotly_figure(**kwargs))
def from_dict(cls, d):
exp = SensitivityExplanation()
exp.explanations = d['explanations']
return exp
|
def SwitchNot(name, *conditions):
conditions = _MakeList(conditions)
return core.scoped_execution_step(_get_next_step_name('SwitchNot', name), [_RunOnceIfNot((name + '/SwitchNot'), cond, step) for (cond, step) in conditions])
|
def get_weight_norm(model):
return torch.norm(torch.stack([torch.norm(p[1].detach()) for p in model.named_parameters() if ('weight' in p[0])]))
|
class _IntegerLessThan(Constraint):
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def check(self, value):
return (((value % 1) == 0) & (value <= self.upper_bound))
|
def push_to_influx(metric_name: str, value: int, labels: dict) -> bool:
return batch_push_to_influx([(metric_name, value, labels)])
|
class AdjacentTempDirectory(TempDirectory):
LEADING_CHARS = '-~.=%'
def __init__(self, original, delete=None):
self.original = original.rstrip('/\\')
super(AdjacentTempDirectory, self).__init__(delete=delete)
def _generate_names(cls, name):
for i in range(1, len(name)):
for candidate in itertools.combinations_with_replacement(cls.LEADING_CHARS, (i - 1)):
new_name = (('~' + ''.join(candidate)) + name[i:])
if (new_name != name):
(yield new_name)
for i in range(len(cls.LEADING_CHARS)):
for candidate in itertools.combinations_with_replacement(cls.LEADING_CHARS, i):
new_name = (('~' + ''.join(candidate)) + name)
if (new_name != name):
(yield new_name)
def _create(self, kind):
(root, name) = os.path.split(self.original)
for candidate in self._generate_names(name):
path = os.path.join(root, candidate)
try:
os.mkdir(path)
except OSError as ex:
if (ex.errno != errno.EEXIST):
raise
else:
path = os.path.realpath(path)
break
else:
path = os.path.realpath(tempfile.mkdtemp(prefix='pip-{}-'.format(kind)))
logger.debug('Created temporary directory: %s', path)
return path
|
class CapFiltCaptionDataset(BaseDataset, __DisplMixin):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
self.img_ids = {}
n = 0
for ann in self.annotation:
ann['image_id'] = ''.join(ann['image'].split('.')[:(- 1)])
img_id = ann['image_id']
if (img_id not in self.img_ids.keys()):
self.img_ids[img_id] = n
n += 1
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(ann['image'])
try:
image = Image.open(image_path).convert('RGB')
except:
return None
image = self.vis_processor(image)
caption = self.text_processor(ann['caption'])
return {'image': image, 'text_input': caption, 'image_id': ann['image_id']}
|
class TestDatasetFromList(unittest.TestCase):
((sys.version_info.minor <= 6), 'Not supported in Python 3.6')
def test_using_lazy_path(self):
dataset = []
for i in range(10):
dataset.append({'file_name': LazyPath(partial(_a_slow_func, i))})
dataset = DatasetFromList(dataset)
for i in range(10):
path = dataset[i]['file_name']
self.assertTrue(isinstance(path, LazyPath))
self.assertEqual(os.fspath(path), _a_slow_func(i))
|
def get_git_commit_hash():
import subprocess
p = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE)
(git_commit, _) = p.communicate()
git_commit = git_commit.strip().decode('utf-8')
return git_commit
|
class BertTokenizer(object):
def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True, never_split=('[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]')):
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = (max_len if (max_len is not None) else int(.0))
def tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def convert_tokens_to_ids(self, tokens):
ids = []
for token in tokens:
ids.append(self.vocab[token])
if (len(ids) > self.max_len):
logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this BERT model ({} > {}). Running this sequence through BERT will result in indexing errors'.format(len(ids), self.max_len))
return ids
def convert_ids_to_tokens(self, ids):
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def save_vocabulary(self, vocab_path):
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_NAME)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))
index = token_index
writer.write((token + u'\n'))
index += 1
return vocab_file
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
if (pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP):
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
if (('-cased' in pretrained_model_name_or_path) and kwargs.get('do_lower_case', True)):
logger.warning('The pre-trained model you are loading is a cased model but you have not set `do_lower_case` to False. We are setting `do_lower_case=False` for you but you may want to check this behavior.')
kwargs['do_lower_case'] = False
elif (('-cased' not in pretrained_model_name_or_path) and (not kwargs.get('do_lower_case', True))):
logger.warning('The pre-trained model you are loading is an uncased model but you have set `do_lower_case` to False. We are setting `do_lower_case=True` for you but you may want to check this behavior.')
kwargs['do_lower_case'] = True
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except EnvironmentError:
if (pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP):
logger.error("Couldn't reach server at '{}' to download vocabulary.".format(vocab_file))
else:
logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name_or_path, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file))
return None
if (resolved_vocab_file == vocab_file):
logger.info('loading vocabulary file {}'.format(vocab_file))
else:
logger.info('loading vocabulary file {} from cache at {}'.format(vocab_file, resolved_vocab_file))
if (pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP):
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(.0)), max_len)
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
|
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
def can_parse(cls, obj):
return (obj.kind == cls.kind)
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
def set_parameters(self, data):
vs = [ddc.value for ddc in data.detaileddescription.content_]
pls = []
for v in vs:
if hasattr(v, 'parameterlist'):
pls += v.parameterlist
pis = []
for pl in pls:
pis += pl.parameteritem
dpis = []
for pi in pis:
dpi = DoxyParameterItem(pi)
dpi._parse()
dpis.append(dpi)
self._data['params'] = dpis
|
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch_models.__dict__[d['arch']]()
load_model_state_dict(model, name)
model = Smooth(model, d['noise_sigma'], d['n'], d['alpha'], d['mean'], d['std'])
return model
return classifier_loader
|
def test_validate_series(df_broken_email: pd.DataFrame) -> None:
df_valid = validate_email(df_broken_email['messy_email'])
df_check = pd.Series([True, True, False, True, False, False, False, False], name='messy_lat_long')
assert df_check.equals(df_valid)
|
_module
class NonLinearNeckV0(nn.Module):
def __init__(self, in_channels, hid_channels, out_channels, sync_bn=False, with_avg_pool=True):
super(NonLinearNeckV0, self).__init__()
self.with_avg_pool = with_avg_pool
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if (version.parse(torch.__version__) < version.parse('1.4.0')):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.fc0 = nn.Linear(in_channels, hid_channels)
if sync_bn:
(_, self.bn0) = build_norm_layer(dict(type='SyncBN', momentum=0.001, affine=False), hid_channels)
else:
self.bn0 = nn.BatchNorm1d(hid_channels, momentum=0.001, affine=False)
self.fc1 = nn.Linear(hid_channels, out_channels)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout()
self.sync_bn = sync_bn
def init_weights(self, init_linear='normal'):
_init_weights(self, init_linear)
def _forward_syncbn(self, module, x):
assert (x.dim() == 2)
if self.expand_for_syncbn:
x = module(x.unsqueeze((- 1)).unsqueeze((- 1))).squeeze((- 1)).squeeze((- 1))
else:
x = module(x)
return x
def forward(self, x):
assert (len(x) == 1)
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc0(x)
if self.sync_bn:
x = self._forward_syncbn(self.bn0, x)
else:
x = self.bn0(x)
x = self.relu(x)
x = self.drop(x)
x = self.fc1(x)
x = self.relu(x)
return [x]
|
def tf_test_error_rate(logits, x, X_test, y_test):
assert (len(X_test) == len(y_test))
eval_prediction = K.softmax(logits)
predictions = batch_eval([x], [eval_prediction], [X_test])[0]
return error_rate(predictions, y_test)
|
class Credentials(ABC, LoggingBase):
def __init__(self):
super().__init__()
'\n Create credentials instance from user config and cached values.\n '
def deserialize(config: dict, cache: Cache, handlers: LoggingHandlers) -> 'Credentials':
pass
'\n Serialize to JSON for storage in cache.\n '
def serialize(self) -> dict:
pass
|
def _process_group_construct_rpc_backend_options_handler(rpc_timeout, init_method, num_send_recv_threads=rpc_constants.DEFAULT_NUM_SEND_RECV_THREADS, **kwargs):
from . import ProcessGroupRpcBackendOptions
return ProcessGroupRpcBackendOptions(rpc_timeout=rpc_timeout, init_method=init_method, num_send_recv_threads=num_send_recv_threads)
|
def test_BBPSSWMessage():
msg = BBPSSWMessage(BBPSSWMsgType.PURIFICATION_RES, 'another', meas_res=0)
assert (msg.msg_type == BBPSSWMsgType.PURIFICATION_RES)
assert (msg.receiver == 'another')
assert (msg.meas_res == 0)
with pytest.raises(Exception):
BBPSSWMessage('unknown type')
|
class ValidationMonitor(object):
def __init__(self, writer):
self._writer = writer
def add(self, i, val_results):
all_test_metric = val_results[0]
val_loss = val_results[1]
self._writer.add_scalar('Metrics/1_ER-LD', all_test_metric[0], i)
self._writer.add_scalar('Metrics/2_F-LD', all_test_metric[1], i)
self._writer.add_scalar('Metrics/3_LE-CD', all_test_metric[2], i)
self._writer.add_scalar('Metrics/4_LR-CD', all_test_metric[3], i)
self._writer.add_scalar('Metrics/0_SELD-error', all_test_metric[4], i)
self._writer.add_scalar('Loss/val', val_loss, i)
|
class ONMTDatasetBase(torchtext.data.Dataset):
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
return super(ONMTDatasetBase, self).__reduce_ex__()
def load_fields(self, vocab_dict):
from onmt.io.IO import load_fields_from_vocab
fields = load_fields_from_vocab(vocab_dict.items(), self.data_type)
self.fields = dict([(k, f) for (k, f) in fields.items() if (k in self.examples[0].__dict__)])
def extract_text_features(tokens):
if (not tokens):
return ([], [], (- 1))
split_tokens = [token.split(u'') for token in tokens]
split_tokens = [token for token in split_tokens if token[0]]
token_size = len(split_tokens[0])
assert all(((len(token) == token_size) for token in split_tokens)), 'all words must have the same number of features'
words_and_features = list(zip(*split_tokens))
words = words_and_features[0]
features = words_and_features[1:]
return (words, features, (token_size - 1))
def _join_dicts(self, *args):
return dict(chain(*[d.items() for d in args]))
def _peek(self, seq):
first = next(seq)
return (first, chain([first], seq))
def _construct_example_fromlist(self, data, fields):
ex = torchtext.data.Example()
for ((name, field), val) in zip(fields, data):
if (field is not None):
setattr(ex, name, field.preprocess(val))
else:
setattr(ex, name, val)
return ex
|
def knn(m_xx, m_xy, m_yy, k, sqrt=False):
n0 = m_xx.size(0)
n1 = m_yy.size(0)
label = torch.cat((torch.ones(n0), torch.zeros(n1))).to(m_xx)
mat = torch.cat((torch.cat((m_xx, m_xy), 1), torch.cat((m_xy.transpose(0, 1), m_yy), 1)), 0)
if sqrt:
mat = mat.abs().sqrt()
(val, idx) = (mat + torch.diag((float('inf') * torch.ones((n0 + n1)).to(m_xx)))).topk(k, 0, False)
count = torch.zeros((n0 + n1)).to(m_xx)
for i in range(0, k):
count = (count + label.index_select(0, idx[i]))
pred = torch.ge(count, ((float(k) / 2) * torch.ones((n0 + n1)).to(m_xx))).float()
s = {'tp': (pred * label).sum(), 'fp': (pred * (1 - label)).sum(), 'fn': ((1 - pred) * label).sum(), 'tn': ((1 - pred) * (1 - label)).sum()}
s.update({'precision': (s['tp'] / ((s['tp'] + s['fp']) + 1e-10)), 'recall': (s['tp'] / ((s['tp'] + s['fn']) + 1e-10)), 'acc_t': (s['tp'] / ((s['tp'] + s['fn']) + 1e-10)), 'acc_f': (s['tn'] / ((s['tn'] + s['fp']) + 1e-10)), 'acc': torch.eq(label, pred).float().mean()})
return s
|
(scope='module')
def dataframe_only_item_none_pandas():
data_only_item_none = [(1, [2, 0, 0, 0, 0, 0], [19842]), (1, [2, 4, 0, 0, 0, 0], [19842, 19844]), (1, [2, 4, 3, 0, 0, 0], [19842, 19844, 19843]), (1, [2, 4, 3, 5, 0, 0], [19842, 19844, 19843, 19845]), (1, [2, 4, 3, 5, 6, 0], [19842, 19844, 19843, 19845, 19846]), (1, [2, 4, 3, 5, 6, 7], [19842, 19844, 19843, 19845, 19846, 19847]), (2, [1, 0, 0, 0, 0, 0], [19841]), (2, [1, 2, 0, 0, 0, 0], [19841, 19842]), (2, [1, 2, 3, 0, 0, 0], [19841, 19842, 19843]), (2, [1, 2, 3, 4, 0, 0], [19841, 19842, 19843, 19844]), (3, [10, 0, 0, 0, 0, 0], [19844]), (4, [10, 11, 0, 0, 0, 0], [19844, 19843]), (4, [10, 11, 12, 0, 0, 0], [19844, 19843, 19845]), (10, [1, 0, 0, 0, 0, 0], [19841])]
return pd.DataFrame(data_only_item_none, columns=['user_id', 'item_id', 'timestamp'])
|
class MultiPrototypes(nn.Module):
def __init__(self, output_dim, nmb_prototypes):
super(MultiPrototypes, self).__init__()
self.nmb_heads = len(nmb_prototypes)
for (i, k) in enumerate(nmb_prototypes):
self.add_module(('prototypes' + str(i)), nn.Linear(output_dim, k, bias=False))
def forward(self, x):
out = []
for i in range(self.nmb_heads):
out.append(getattr(self, ('prototypes' + str(i)))(x))
return out
|
def load_questions(filename='questions.csv'):
questions = pd.read_csv(filename)
questions.dropna(axis=1, how='all', inplace=True)
return questions
|
def validate_pathname_binary_tuple(data):
if (not isinstance(data, tuple)):
raise TypeError('pathname binary data should be tuple type, but got {}'.format(type(data)))
if (len(data) != 2):
raise TypeError('pathname binary tuple length should be 2, but got {}'.format(str(len(data))))
if (not isinstance(data[0], str)):
raise TypeError('pathname binary tuple should have string type pathname, but got {}'.format(type(data[0])))
if (not isinstance(data[1], BufferedIOBase)):
raise TypeError('pathname binary tuple should have BufferedIOBase based binary type, but got {}'.format(type(data[1])))
|
def _get_string_replacement(tok: Token) -> List[Token]:
result = []
if ((tok.ttype == tokens.Token.Literal.String.Symbol) or (tok.ttype == tokens.Token.Literal.String.Single)):
v = tok.value
result.append((v[0] + v[(- 1)]))
(start, end) = (1, (len(v) - 1))
for span_start in range(start, end):
for span_end in range((span_start + 1), end):
v_new = ((v[0] + v[span_start:span_end]) + v[(- 1)])
result.append(v_new)
v_new = (v[:span_start] + v[span_end:])
result.append(v_new)
v = v.replace('%', '')
for add_percent_last in ('%', ''):
for add_percent_first in ('%', ''):
result.append(((((v[0] + add_percent_first) + v[1:(- 1)]) + add_percent_last) + v[(- 1)]))
result = [Token(tok.ttype, v_new) for v_new in set(result) if (v_new != v)]
return result
|
class InstanceNorm1d(torch.nn.InstanceNorm1d):
def __init__(self, num_features, weight, bias, scale, zero_point, eps=1e-05, momentum=0.1, affine=False, track_running_stats=False):
super(InstanceNorm1d, self).__init__(num_features, eps, momentum, affine, track_running_stats)
self.weight = weight
self.bias = bias
self.scale = scale
self.zero_point = zero_point
def forward(self, input):
return torch.ops.quantized.instance_norm(input, self.weight, self.bias, self.eps, self.scale, self.zero_point)
def _get_name(self):
return 'QuantizedInstanceNorm1d'
def from_float(cls, mod):
activation_post_process = mod.activation_post_process
(scale, zero_point) = mod.activation_post_process.calculate_qparams()
new_mod = cls(mod.num_features, mod.weight, mod.bias, float(scale), int(zero_point), mod.eps, mod.affine)
return new_mod
|
def compare(fitness_1: float, fitness_2: float) -> int:
if (fitness_1 < fitness_2):
return (- 1)
if (fitness_1 > fitness_2):
return 1
return 0
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--wordvec_pretrain_file', type=str, default=None, help='Exact name of the pretrain file to read')
parser.add_argument('--charlm', default='default', type=str, help='Which charlm to run on. Will use the default charlm for this language/model if not set. Set to None to turn off charlm for languages with a default charlm')
parser.add_argument('--no_charlm', dest='charlm', action='store_const', const=None, help="Don't use a charlm, even if one is used by default for this package")
parser.add_argument('--load_dir', type=str, default='saved_models/constituency', help='Root dir for getting the models to resave.')
parser.add_argument('--save_dir', type=str, default='resaved_models/constituency', help='Root dir for resaving the models.')
parser.add_argument('treebanks', type=str, nargs='+', help='Which treebanks to run on. Use all_ud or ud_all for all UD treebanks')
args = parser.parse_args()
return args
|
def makedir(dir_path):
is_success = False
try:
if (not g_pathmgr.exists(dir_path)):
g_pathmgr.mkdirs(dir_path)
is_success = True
except BaseException:
print(f'Error creating directory: {dir_path}')
return is_success
|
class AccumulateMeter(object):
def __init__(self, greater_is_better=True, print_precision=4):
self.greater_is_better = greater_is_better
self.print_precision = print_precision
self.reset()
def reset(self):
self.avg = 0.0
self.val = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.avg = (((self.avg * self.count) + (val * n)) / (self.count + n))
self.count += n
def __add__(self, other):
if (other.count > 0):
self.update(other.avg, other.count)
return self
def avg_better_than(self, other):
if self.greater_is_better:
return (self.avg > other.avg)
else:
return (self.avg < other.avg)
def avg_better_than_float(self, afloat):
if self.greater_is_better:
return (self.avg > afloat)
else:
return (self.avg < afloat)
def __repr__(self):
return f'{self.avg:.{self.print_precision}f}'
|
class GranularizePipe(Pipe):
def __init__(self, task=None):
super().__init__()
self.task = task
def _granularize(self, data_bundle, tag_map):
for name in list(data_bundle.datasets.keys()):
dataset = data_bundle.get_dataset(name)
dataset.apply_field((lambda target: tag_map.get(target, (- 100))), field_name=Const.TARGET, new_field_name=Const.TARGET)
dataset.drop((lambda ins: (ins[Const.TARGET] == (- 100))))
data_bundle.set_dataset(dataset, name)
return data_bundle
def process(self, data_bundle: DataBundle):
task_tag_dict = {'XNLI': {'neutral': 0, 'entailment': 1, 'contradictory': 2, 'contradiction': 2}}
if (self.task in task_tag_dict):
data_bundle = self._granularize(data_bundle=data_bundle, tag_map=task_tag_dict[self.task])
else:
raise RuntimeError(f'Only support {task_tag_dict.keys()} task_tag_map.')
return data_bundle
|
def add_variables(field, variables):
if (not variables):
return field
if is_FractionField(field):
R = field.ring()
if (is_PolynomialRing(R) or is_MPolynomialRing(R)):
new_variables = list(R.variable_names())
for v in variables:
if (v not in new_variables):
new_variables.append(v)
if (len(new_variables) > R.ngens()):
return PolynomialRing(R.base_ring(), new_variables).fraction_field()
else:
return field
new_variables = []
for v in variables:
if (v not in new_variables):
new_variables.append(v)
return PolynomialRing(field, new_variables).fraction_field()
|
def require_cython(test_case):
return unittest.skipUnless(is_cython_available(), 'test requires cython')(test_case)
|
_grad()
def check_forward_equal_with_pytorch_float():
value = (torch.rand(N, S, M, D).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch, rtol=0.01, atol=0.001)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
|
def simulate_policy():
file = './her-sac-fetch-experiment/her-sac-fetch-experiment_2020_07_07_11_11_14_0000--s-0/params.pkl'
data = torch.load(file)
policy = data['evaluation/policy']
policy.reset()
def policy_func(obs):
(a, agent_info) = policy.get_action(obs)
return a
task = generate_task(task_generator_id='reaching')
env = CausalWorld(task=task, enable_visualization=True, skip_frame=1, seed=0, max_episode_length=2500)
env = CurriculumWrapper(env, intervention_actors=[GoalInterventionActorPolicy()], actives=[(0, , 1, 0)])
for _ in range(100):
total_reward = 0
o = env.reset()
for _ in range(2500):
(o, reward, done, info) = env.step(policy_func(o))
total_reward += reward
print('total reward is :', total_reward)
env.close()
|
class Metadata():
platform: PlatformMetadata = field(default_factory=PlatformMetadata)
interpreter: InterpreterMetadata = field(default_factory=InterpreterMetadata)
cli: CliMetadata = field(default_factory=CliMetadata)
docker_image: (str | None) = field(default_factory=(lambda : os.getenv(DOCKER_IMAGE_ENV_VAR)))
|
def p_matrix(p):
(startl, endl) = p.linespan(0)
(startc, endc) = p.lexspan(0)
di0 = dace.dtypes.DebugInfo(startl, startc, endl, endc)
if (len(p) == 3):
p[0] = AST_Matrix(di0, [])
else:
p[0] = AST_Matrix(di0, p[2])
|
_module()
class Runner(EpochBasedRunner):
def __init__(self, *args, **kwargs):
warnings.warn('Runner was deprecated, please use EpochBasedRunner instead')
super().__init__(*args, **kwargs)
|
def convert_to_float(value):
if isinstance(value, float):
return value
if isinstance(value, int):
return float(value)
if (not isinstance(value, str)):
raise ValueError("Argument value is not a string. Can't parse it as float")
sanitized = value
try:
if (('.' in sanitized) and (',' in sanitized)):
return float(sanitized.replace(',', ''))
if ((',' in sanitized) and _split_thousands(',', sanitized)):
return float(sanitized.replace(',', ''))
if ((',' in sanitized) and (sanitized.count(',') == 1) and (not _split_thousands(',', sanitized))):
return float(sanitized.replace(',', '.'))
if (sanitized.count('.') > 1):
return float(sanitized.replace('.', ''))
if (sanitized.count(',') > 1):
return float(sanitized.replace(',', ''))
return float(sanitized)
except ValueError:
raise ValueError('Unable to convert value to float')
|
class AudioNTT2020(AudioNTT2020Task6):
def __init__(self, n_mels=64, d=512):
super().__init__(n_mels=n_mels, d=d)
def forward(self, x):
x = super().forward(x)
(x1, _) = torch.max(x, dim=1)
x2 = torch.mean(x, dim=1)
x = (x1 + x2)
assert ((x.shape[1] == self.d) and (x.ndim == 2))
return x
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 51