code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def read_csv(csv_file, class_whitelist=None): """Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class labels, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values labels, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0. """ entries = defaultdict(list) boxes = defaultdict(list) labels = defaultdict(list) scores = defaultdict(list) reader = csv.reader(csv_file) for row in reader: assert len(row) in [7, 8], 'Wrong number of columns: ' + row image_key = make_image_key(row[0], row[1]) x1, y1, x2, y2 = [float(n) for n in row[2:6]] action_id = int(row[6]) if class_whitelist and action_id not in class_whitelist: continue score = 1.0 if len(row) == 8: score = float(row[7]) entries[image_key].append((score, action_id, y1, x1, y2, x2)) for image_key in entries: # Evaluation API assumes boxes with descending scores entry = sorted(entries[image_key], key=lambda tup: -tup[0]) boxes[image_key] = [x[2:] for x in entry] labels[image_key] = [x[1] for x in entry] scores[image_key] = [x[0] for x in entry] return boxes, labels, scores
Loads boxes and class labels from a CSV file in the AVA format. CSV file format described at https://research.google.com/ava/download.html. Args: csv_file: A file object. class_whitelist: If provided, boxes corresponding to (integer) class labels not in this set are skipped. Returns: boxes: A dictionary mapping each unique image key (string) to a list of boxes, given as coordinates [y1, x1, y2, x2]. labels: A dictionary mapping each unique image key (string) to a list of integer class labels, matching the corresponding box in `boxes`. scores: A dictionary mapping each unique image key (string) to a list of score values labels, matching the corresponding label in `labels`. If scores are not provided in the csv, then they will default to 1.0.
read_csv
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py
Apache-2.0
def _import_ground_truth(ground_truth_filename): """Read ground truth file and return the ground truth instances and the activity classes. Args: ground_truth_filename (str): Full path to the ground truth json file. Returns: tuple[list, dict]: (ground_truth, activity_index). ground_truth contains the ground truth instances, which is in a dict format. activity_index contains classes index. """ with open(ground_truth_filename, 'r') as f: data = json.load(f) # Checking format activity_index, class_idx = {}, 0 ground_truth = [] for video_id, video_info in data.items(): for anno in video_info['annotations']: if anno['label'] not in activity_index: activity_index[anno['label']] = class_idx class_idx += 1 # old video_anno ground_truth_item = {} ground_truth_item['video-id'] = video_id[2:] ground_truth_item['t-start'] = float(anno['segment'][0]) ground_truth_item['t-end'] = float(anno['segment'][1]) ground_truth_item['label'] = activity_index[anno['label']] ground_truth.append(ground_truth_item) return ground_truth, activity_index
Read ground truth file and return the ground truth instances and the activity classes. Args: ground_truth_filename (str): Full path to the ground truth json file. Returns: tuple[list, dict]: (ground_truth, activity_index). ground_truth contains the ground truth instances, which is in a dict format. activity_index contains classes index.
_import_ground_truth
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def _import_prediction(self, prediction_filename): """Read prediction file and return the prediction instances. Args: prediction_filename (str): Full path to the prediction json file. Returns: List: List containing the prediction instances (dictionaries). """ with open(prediction_filename, 'r') as f: data = json.load(f) # Read predictions. prediction = [] for video_id, video_info in data['results'].items(): for result in video_info: prediction_item = dict() prediction_item['video-id'] = video_id prediction_item['label'] = self.activity_index[result['label']] prediction_item['t-start'] = float(result['segment'][0]) prediction_item['t-end'] = float(result['segment'][1]) prediction_item['score'] = result['score'] prediction.append(prediction_item) return prediction
Read prediction file and return the prediction instances. Args: prediction_filename (str): Full path to the prediction json file. Returns: List: List containing the prediction instances (dictionaries).
_import_prediction
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def wrapper_compute_average_precision(self): """Computes average precision for each class.""" ap = np.zeros((len(self.tiou_thresholds), len(self.activity_index))) # Adaptation to query faster ground_truth_by_label = [] prediction_by_label = [] for i in range(len(self.activity_index)): ground_truth_by_label.append([]) prediction_by_label.append([]) for gt in self.ground_truth: ground_truth_by_label[gt['label']].append(gt) for pred in self.prediction: prediction_by_label[pred['label']].append(pred) for i in range(len(self.activity_index)): ap_result = compute_average_precision_detection( ground_truth_by_label[i], prediction_by_label[i], self.tiou_thresholds) ap[:, i] = ap_result return ap
Computes average precision for each class.
wrapper_compute_average_precision
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def evaluate(self): """Evaluates a prediction file. For the detection task we measure the interpolated mean average precision to measure the performance of a method. """ self.ap = self.wrapper_compute_average_precision() self.mAP = self.ap.mean(axis=1) self.average_mAP = self.mAP.mean() return self.mAP, self.average_mAP
Evaluates a prediction file. For the detection task we measure the interpolated mean average precision to measure the performance of a method.
evaluate
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def compute_average_precision_detection(ground_truth, prediction, tiou_thresholds=np.linspace( 0.5, 0.95, 10)): """Compute average precision (detection task) between ground truth and predictions data frames. If multiple predictions occurs for the same predicted segment, only the one with highest score is matches as true positive. This code is greatly inspired by Pascal VOC devkit. Args: ground_truth (list[dict]): List containing the ground truth instances (dictionaries). Required keys are 'video-id', 't-start' and 't-end'. prediction (list[dict]): List containing the prediction instances (dictionaries). Required keys are: 'video-id', 't-start', 't-end' and 'score'. tiou_thresholds (np.ndarray): A 1darray indicates the temporal intersection over union threshold, which is optional. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: Float: ap, Average precision score. """ num_thresholds = len(tiou_thresholds) num_gts = len(ground_truth) num_preds = len(prediction) ap = np.zeros(num_thresholds) if len(prediction) == 0: return ap num_positive = float(num_gts) lock_gt = np.ones((num_thresholds, num_gts)) * -1 # Sort predictions by decreasing score order. prediction.sort(key=lambda x: -x['score']) # Initialize true positive and false positive vectors. tp = np.zeros((num_thresholds, num_preds)) fp = np.zeros((num_thresholds, num_preds)) # Adaptation to query faster ground_truth_by_videoid = {} for i, item in enumerate(ground_truth): item['index'] = i ground_truth_by_videoid.setdefault(item['video-id'], []).append(item) # Assigning true positive to truly grount truth instances. for idx, pred in enumerate(prediction): if pred['video-id'] in ground_truth_by_videoid: gts = ground_truth_by_videoid[pred['video-id']] else: fp[:, idx] = 1 continue tiou_arr = pairwise_temporal_iou( np.array([pred['t-start'], pred['t-end']]), np.array([np.array([gt['t-start'], gt['t-end']]) for gt in gts])) tiou_arr = tiou_arr.reshape(-1) # We would like to retrieve the predictions with highest tiou score. tiou_sorted_idx = tiou_arr.argsort()[::-1] for t_idx, tiou_threshold in enumerate(tiou_thresholds): for j_idx in tiou_sorted_idx: if tiou_arr[j_idx] < tiou_threshold: fp[t_idx, idx] = 1 break if lock_gt[t_idx, gts[j_idx]['index']] >= 0: continue # Assign as true positive after the filters above. tp[t_idx, idx] = 1 lock_gt[t_idx, gts[j_idx]['index']] = idx break if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0: fp[t_idx, idx] = 1 tp_cumsum = np.cumsum(tp, axis=1).astype(np.float64) fp_cumsum = np.cumsum(fp, axis=1).astype(np.float64) recall_cumsum = tp_cumsum / num_positive precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum) for t_idx in range(len(tiou_thresholds)): ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :], recall_cumsum[t_idx, :]) return ap
Compute average precision (detection task) between ground truth and predictions data frames. If multiple predictions occurs for the same predicted segment, only the one with highest score is matches as true positive. This code is greatly inspired by Pascal VOC devkit. Args: ground_truth (list[dict]): List containing the ground truth instances (dictionaries). Required keys are 'video-id', 't-start' and 't-end'. prediction (list[dict]): List containing the prediction instances (dictionaries). Required keys are: 'video-id', 't-start', 't-end' and 'score'. tiou_thresholds (np.ndarray): A 1darray indicates the temporal intersection over union threshold, which is optional. Default: ``np.linspace(0.5, 0.95, 10)``. Returns: Float: ap, Average precision score.
compute_average_precision_detection
python
open-mmlab/mmaction2
mmaction/evaluation/functional/eval_detection.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/eval_detection.py
Apache-2.0
def overlap2d_voc(b1, b2): """Compute the overlaps between a set of boxes b1 and one box b2.""" xmin = np.maximum(b1[:, 0], b2[:, 0]) ymin = np.maximum(b1[:, 1], b2[:, 1]) xmax = np.minimum(b1[:, 2], b2[:, 2]) ymax = np.minimum(b1[:, 3], b2[:, 3]) width = np.maximum(0, xmax - xmin) height = np.maximum(0, ymax - ymin) return width * height
Compute the overlaps between a set of boxes b1 and one box b2.
overlap2d_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def iou2d_voc(b1, b2): """Compute the IoU between a set of boxes b1 and 1 box b2.""" if b1.ndim == 1: b1 = b1[None, :] if b2.ndim == 1: b2 = b2[None, :] assert b2.shape[0] == 1 ov = overlap2d_voc(b1, b2) return ov / (area2d_voc(b1) + area2d_voc(b2) - ov)
Compute the IoU between a set of boxes b1 and 1 box b2.
iou2d_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def iou3d_voc(b1, b2): """Compute the IoU between two tubes with same temporal extent.""" assert b1.shape[0] == b2.shape[0] assert np.all(b1[:, 0] == b2[:, 0]) ov = overlap2d_voc(b1[:, 1:5], b2[:, 1:5]) return np.mean(ov / (area2d_voc(b1[:, 1:5]) + area2d_voc(b2[:, 1:5]) - ov))
Compute the IoU between two tubes with same temporal extent.
iou3d_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def iou3dt_voc(b1, b2, spatialonly=False, temporalonly=False): """Compute the spatio-temporal IoU between two tubes.""" tmin = max(b1[0, 0], b2[0, 0]) tmax = min(b1[-1, 0], b2[-1, 0]) if tmax < tmin: return 0.0 temporal_inter = tmax - tmin temporal_union = max(b1[-1, 0], b2[-1, 0]) - min(b1[0, 0], b2[0, 0]) tube1 = b1[int(np.where( b1[:, 0] == tmin)[0]):int(np.where(b1[:, 0] == tmax)[0]) + 1, :] tube2 = b2[int(np.where( b2[:, 0] == tmin)[0]):int(np.where(b2[:, 0] == tmax)[0]) + 1, :] if temporalonly: return temporal_inter / temporal_union return iou3d_voc(tube1, tube2) * (1. if spatialonly else temporal_inter / temporal_union)
Compute the spatio-temporal IoU between two tubes.
iou3dt_voc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def nms_tubelets(dets, overlapThresh=0.3, top_k=None): """Compute the NMS for a set of scored tubelets scored tubelets are numpy array with 4K+1 columns, last one being the score return the indices of the tubelets to keep.""" # If there are no detections, return an empty list if len(dets) == 0: return dets if top_k is None: top_k = len(dets) K = int((dets.shape[1] - 1) / 4) # Coordinates of bounding boxes x1 = [dets[:, 4 * k] for k in range(K)] y1 = [dets[:, 4 * k + 1] for k in range(K)] x2 = [dets[:, 4 * k + 2] for k in range(K)] y2 = [dets[:, 4 * k + 3] for k in range(K)] # Compute the area of the bounding boxes and sort the bounding # boxes by the bottom-right y-coordinate of the bounding box # area = (x2 - x1 + 1) * (y2 - y1 + 1) scores = dets[:, -1] area = [(x2[k] - x1[k] + 1) * (y2[k] - y1[k] + 1) for k in range(K)] order = np.argsort(scores)[::-1] weight = np.zeros_like(scores) + 1 counter = 0 while order.size > 0: i = order[0] counter += 1 # Compute overlap xx1 = [np.maximum(x1[k][i], x1[k][order[1:]]) for k in range(K)] yy1 = [np.maximum(y1[k][i], y1[k][order[1:]]) for k in range(K)] xx2 = [np.minimum(x2[k][i], x2[k][order[1:]]) for k in range(K)] yy2 = [np.minimum(y2[k][i], y2[k][order[1:]]) for k in range(K)] w = [np.maximum(0, xx2[k] - xx1[k] + 1) for k in range(K)] h = [np.maximum(0, yy2[k] - yy1[k] + 1) for k in range(K)] inter_area = [w[k] * h[k] for k in range(K)] ious = sum([ inter_area[k] / (area[k][order[1:]] + area[k][i] - inter_area[k]) for k in range(K) ]) index = np.where(ious > overlapThresh * K)[0] weight[order[index + 1]] = 1 - ious[index] index2 = np.where(ious <= overlapThresh * K)[0] order = order[index2 + 1] dets[:, -1] = dets[:, -1] * weight new_scores = dets[:, -1] new_order = np.argsort(new_scores)[::-1] dets = dets[new_order, :] return dets[:top_k, :]
Compute the NMS for a set of scored tubelets scored tubelets are numpy array with 4K+1 columns, last one being the score return the indices of the tubelets to keep.
nms_tubelets
python
open-mmlab/mmaction2
mmaction/evaluation/functional/multisports_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/multisports_utils.py
Apache-2.0
def compute_precision_recall(scores, labels, num_gt): """Compute precision and recall. Args: scores: A float numpy array representing detection score labels: A boolean numpy array representing true/false positive labels num_gt: Number of ground truth instances Raises: ValueError: if the input is not of the correct format Returns: precision: Fraction of positive instances over detected ones. This value is None if no ground truth labels are present. recall: Fraction of detected positive instance over all positive instances. This value is None if no ground truth labels are present. """ if (not isinstance(labels, np.ndarray) or labels.dtype != bool or len(labels.shape) != 1): raise ValueError('labels must be single dimension bool numpy array') if not isinstance(scores, np.ndarray) or len(scores.shape) != 1: raise ValueError('scores must be single dimension numpy array') if num_gt < np.sum(labels): raise ValueError( 'Number of true positives must be smaller than num_gt.') if len(scores) != len(labels): raise ValueError('scores and labels must be of the same size.') if num_gt == 0: return None, None sorted_indices = np.argsort(scores) sorted_indices = sorted_indices[::-1] labels = labels.astype(int) true_positive_labels = labels[sorted_indices] false_positive_labels = 1 - true_positive_labels cum_true_positives = np.cumsum(true_positive_labels) cum_false_positives = np.cumsum(false_positive_labels) precision = cum_true_positives.astype(float) / ( cum_true_positives + cum_false_positives) recall = cum_true_positives.astype(float) / num_gt return precision, recall
Compute precision and recall. Args: scores: A float numpy array representing detection score labels: A boolean numpy array representing true/false positive labels num_gt: Number of ground truth instances Raises: ValueError: if the input is not of the correct format Returns: precision: Fraction of positive instances over detected ones. This value is None if no ground truth labels are present. recall: Fraction of detected positive instance over all positive instances. This value is None if no ground truth labels are present.
compute_precision_recall
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/metrics.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py
Apache-2.0
def compute_average_precision(precision, recall): """Compute Average Precision according to the definition in VOCdevkit. Precision is modified to ensure that it does not decrease as recall decrease. Args: precision: A float [N, 1] numpy array of precisions recall: A float [N, 1] numpy array of recalls Raises: ValueError: if the input is not of the correct format Returns: average_precison: The area under the precision recall curve. NaN if precision and recall are None. """ if precision is None: if recall is not None: raise ValueError('If precision is None, recall must also be None') return np.NAN if not isinstance(precision, np.ndarray) or not isinstance( recall, np.ndarray): raise ValueError('precision and recall must be numpy array') if precision.dtype != np.float64 or recall.dtype != np.float64: raise ValueError('input must be float numpy array.') if len(precision) != len(recall): raise ValueError('precision and recall must be of the same size.') if not precision.size: return 0.0 if np.amin(precision) < 0 or np.amax(precision) > 1: raise ValueError('Precision must be in the range of [0, 1].') if np.amin(recall) < 0 or np.amax(recall) > 1: raise ValueError('recall must be in the range of [0, 1].') if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)): raise ValueError('recall must be a non-decreasing array') recall = np.concatenate([[0], recall, [1]]) precision = np.concatenate([[0], precision, [0]]) # Preprocess precision to be a non-decreasing array for i in range(len(precision) - 2, -1, -1): precision[i] = np.maximum(precision[i], precision[i + 1]) indices = np.where(recall[1:] != recall[:-1])[0] + 1 average_precision = np.sum( (recall[indices] - recall[indices - 1]) * precision[indices]) return average_precision
Compute Average Precision according to the definition in VOCdevkit. Precision is modified to ensure that it does not decrease as recall decrease. Args: precision: A float [N, 1] numpy array of precisions recall: A float [N, 1] numpy array of recalls Raises: ValueError: if the input is not of the correct format Returns: average_precison: The area under the precision recall curve. NaN if precision and recall are None.
compute_average_precision
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/metrics.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py
Apache-2.0
def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class): """Compute CorLoc according to the definition in the following paper. https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf Returns nans if there are no ground truth images for a class. Args: num_gt_imgs_per_class: 1D array, representing number of images containing at least one object instance of a particular class num_images_correctly_detected_per_class: 1D array, representing number of images that are correctly detected at least one object instance of a particular class Returns: corloc_per_class: A float numpy array represents the corloc score of each class """ # Divide by zero expected for classes with no gt examples. with np.errstate(divide='ignore', invalid='ignore'): return np.where( num_gt_imgs_per_class == 0, np.nan, num_images_correctly_detected_per_class / num_gt_imgs_per_class)
Compute CorLoc according to the definition in the following paper. https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf Returns nans if there are no ground truth images for a class. Args: num_gt_imgs_per_class: 1D array, representing number of images containing at least one object instance of a particular class num_images_correctly_detected_per_class: 1D array, representing number of images that are correctly detected at least one object instance of a particular class Returns: corloc_per_class: A float numpy array represents the corloc score of each class
compute_cor_loc
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/metrics.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/metrics.py
Apache-2.0
def __init__(self, data): """Constructs box collection. Args: data: a numpy array of shape [N, 4] representing box coordinates Raises: ValueError: if bbox data is not a numpy array ValueError: if invalid dimensions for bbox data """ if not isinstance(data, np.ndarray): raise ValueError('data must be a numpy array.') if len(data.shape) != 2 or data.shape[1] != 4: raise ValueError('Invalid dimensions for box data.') if data.dtype != np.float32 and data.dtype != np.float64: raise ValueError( 'Invalid data type for box data: float is required.') if not self._is_valid_boxes(data): raise ValueError('Invalid box data. data must be a numpy array of ' 'N*[y_min, x_min, y_max, x_max]') self.data = {'boxes': data}
Constructs box collection. Args: data: a numpy array of shape [N, 4] representing box coordinates Raises: ValueError: if bbox data is not a numpy array ValueError: if invalid dimensions for bbox data
__init__
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def add_field(self, field, field_data): """Add data to a specified field. Args: field: a string parameter used to specify a related field to be accessed. field_data: a numpy array of [N, ...] representing the data associated with the field. Raises: ValueError: if the field is already exist or the dimension of the field data does not matches the number of boxes. """ if self.has_field(field): raise ValueError('Field ' + field + 'already exists') if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes( ): raise ValueError('Invalid dimensions for field data') self.data[field] = field_data
Add data to a specified field. Args: field: a string parameter used to specify a related field to be accessed. field_data: a numpy array of [N, ...] representing the data associated with the field. Raises: ValueError: if the field is already exist or the dimension of the field data does not matches the number of boxes.
add_field
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def get_field(self, field): """Accesses data associated with the specified field in the box collection. Args: field: a string parameter used to specify a related field to be accessed. Returns: a numpy 1-d array representing data of an associated field Raises: ValueError: if invalid field """ if not self.has_field(field): raise ValueError(f'field {field} does not exist') return self.data[field]
Accesses data associated with the specified field in the box collection. Args: field: a string parameter used to specify a related field to be accessed. Returns: a numpy 1-d array representing data of an associated field Raises: ValueError: if invalid field
get_field
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def get_coordinates(self): """Get corner coordinates of boxes. Returns: a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max] """ box_coordinates = self.get() y_min = box_coordinates[:, 0] x_min = box_coordinates[:, 1] y_max = box_coordinates[:, 2] x_max = box_coordinates[:, 3] return [y_min, x_min, y_max, x_max]
Get corner coordinates of boxes. Returns: a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
get_coordinates
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def _is_valid_boxes(data): """Check whether data fulfills the format of N*[ymin, xmin, ymax, xmin]. Args: data: a numpy array of shape [N, 4] representing box coordinates Returns: a boolean indicating whether all ymax of boxes are equal or greater than ymin, and all xmax of boxes are equal or greater than xmin. """ if len(data) != 0: for v in data: if v[0] > v[2] or v[1] > v[3]: return False return True
Check whether data fulfills the format of N*[ymin, xmin, ymax, xmin]. Args: data: a numpy array of shape [N, 4] representing box coordinates Returns: a boolean indicating whether all ymax of boxes are equal or greater than ymin, and all xmax of boxes are equal or greater than xmin.
_is_valid_boxes
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_list.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_list.py
Apache-2.0
def ioa(boxes1, boxes2): """Computes pairwise intersection-over-area between box collections. Intersection-over-area (ioa) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, IOA(box1, box2) != IOA(box2, box1). Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise ioa scores. """ intersect = intersection(boxes1, boxes2) areas = np.expand_dims(area(boxes2), axis=0) return intersect / areas
Computes pairwise intersection-over-area between box collections. Intersection-over-area (ioa) between two boxes box1 and box2 is defined as their intersection area over box2's area. Note that ioa is not symmetric, that is, IOA(box1, box2) != IOA(box2, box1). Args: boxes1: a numpy array with shape [N, 4] holding N boxes. boxes2: a numpy array with shape [M, 4] holding N boxes. Returns: a numpy array with shape [N, M] representing pairwise ioa scores.
ioa
python
open-mmlab/mmaction2
mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_evaluation/np_box_ops.py
Apache-2.0
def process(self, data_batch: Sequence[Tuple[Any, Dict]], data_samples: Sequence[Dict]) -> None: """Process one batch of data samples and data_samples. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[dict]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model. """ data_samples = copy.deepcopy(data_samples) for data_sample in data_samples: result = dict() pred = data_sample['pred_score'] label = data_sample['gt_label'] # Ad-hoc for RGBPoseConv3D if isinstance(pred, dict): for item_name, score in pred.items(): pred[item_name] = score.cpu().numpy() else: pred = pred.cpu().numpy() result['pred'] = pred if label.size(0) == 1: # single-label result['label'] = label.item() else: # multi-label result['label'] = label.cpu().numpy() self.results.append(result)
Process one batch of data samples and data_samples. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[dict]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model.
process
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py
Apache-2.0
def calculate(self, preds: List[np.ndarray], labels: List[Union[int, np.ndarray]]) -> Dict: """Compute the metrics from processed results. Args: preds (list[np.ndarray]): List of the prediction scores. labels (list[int | np.ndarray]): List of the labels. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results. """ eval_results = OrderedDict() metric_options = copy.deepcopy(self.metric_options) for metric in self.metrics: if metric == 'top_k_accuracy': topk = metric_options.setdefault('top_k_accuracy', {}).setdefault( 'topk', (1, 5)) if not isinstance(topk, (int, tuple)): raise TypeError('topk must be int or tuple of int, ' f'but got {type(topk)}') if isinstance(topk, int): topk = (topk, ) top_k_acc = top_k_accuracy(preds, labels, topk) for k, acc in zip(topk, top_k_acc): eval_results[f'top{k}'] = acc if metric == 'mean_class_accuracy': mean1 = mean_class_accuracy(preds, labels) eval_results['mean1'] = mean1 if metric in [ 'mean_average_precision', 'mmit_mean_average_precision', ]: if metric == 'mean_average_precision': mAP = mean_average_precision(preds, labels) eval_results['mean_average_precision'] = mAP elif metric == 'mmit_mean_average_precision': mAP = mmit_mean_average_precision(preds, labels) eval_results['mmit_mean_average_precision'] = mAP return eval_results
Compute the metrics from processed results. Args: preds (list[np.ndarray]): List of the prediction scores. labels (list[int | np.ndarray]): List of the labels. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results.
calculate
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/acc_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/acc_metric.py
Apache-2.0
def process(self, data_batch: Sequence[Tuple[Any, dict]], predictions: Sequence[dict]) -> None: """Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. predictions (Sequence[dict]): A batch of outputs from the model. """ for pred in predictions: self.results.append(pred) if self.metric_type == 'AR@AN': data_batch = data_batch['data_samples'] for data_sample in data_batch: video_info = data_sample.metainfo video_id = video_info['video_name'][2:] this_video_gt = [] for ann in video_info['annotations']: t_start, t_end = ann['segment'] label = ann['label'] this_video_gt.append([t_start, t_end, label]) self.ground_truth[video_id] = np.array(this_video_gt)
Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. predictions (Sequence[dict]): A batch of outputs from the model.
process
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def compute_metrics(self, results: list) -> dict: """Compute the metrics from processed results. If `metric_type` is 'TEM', only dump middle results and do not compute any metrics. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results. """ self.dump_results(results) if self.metric_type == 'AR@AN': return self.compute_ARAN(results) return OrderedDict()
Compute the metrics from processed results. If `metric_type` is 'TEM', only dump middle results and do not compute any metrics. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results.
compute_metrics
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def dump_results(self, results, version='VERSION 1.3'): """Save middle or final results to disk.""" if self.output_format == 'json': result_dict = self.proposals2json(results) output_dict = { 'version': version, 'results': result_dict, 'external_data': {} } mmengine.dump(output_dict, self.out) elif self.output_format == 'csv': os.makedirs(self.out, exist_ok=True) header = 'action,start,end,tmin,tmax' for result in results: video_name, outputs = result output_path = osp.join(self.out, video_name + '.csv') np.savetxt( output_path, outputs, header=header, delimiter=',', comments='') else: raise ValueError( f'The output format {self.output_format} is not supported.')
Save middle or final results to disk.
dump_results
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def proposals2json(results, show_progress=False): """Convert all proposals to a final dict(json) format. Args: results (list[dict]): All proposals. show_progress (bool): Whether to show the progress bar. Defaults: False. Returns: dict: The final result dict. E.g. .. code-block:: Python dict(video-1=[dict(segment=[1.1,2.0]. score=0.9), dict(segment=[50.1, 129.3], score=0.6)]) """ result_dict = {} print('Convert proposals to json format') if show_progress: prog_bar = mmcv.ProgressBar(len(results)) for result in results: video_name = result['video_name'] result_dict[video_name[2:]] = result['proposal_list'] if show_progress: prog_bar.update() return result_dict
Convert all proposals to a final dict(json) format. Args: results (list[dict]): All proposals. show_progress (bool): Whether to show the progress bar. Defaults: False. Returns: dict: The final result dict. E.g. .. code-block:: Python dict(video-1=[dict(segment=[1.1,2.0]. score=0.9), dict(segment=[50.1, 129.3], score=0.6)])
proposals2json
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/anet_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/anet_metric.py
Apache-2.0
def process(self, data_batch: Sequence[Tuple[Any, dict]], data_samples: Sequence[dict]) -> None: """Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model. """ for data_sample in data_samples: result = dict() pred = data_sample['pred_instances'] result['video_id'] = data_sample['video_id'] result['timestamp'] = data_sample['timestamp'] outputs = bbox2result( pred['bboxes'], pred['scores'], num_classes=self.num_classes, thr=self.action_thr) result['outputs'] = outputs self.results.append(result)
Process one batch of data samples and predictions. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (Sequence[Tuple[Any, dict]]): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model.
process
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/ava_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/ava_metric.py
Apache-2.0
def compute_metrics(self, results: list) -> dict: """Compute the metrics from processed results. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results. """ time_now = datetime.now().strftime('%Y%m%d_%H%M%S') temp_file = f'AVA_{time_now}_result.csv' results2csv(results, temp_file, self.custom_classes) eval_results = ava_eval( temp_file, self.options[0], self.label_file, self.ann_file, self.exclude_file, ignore_empty_frames=True, custom_classes=self.custom_classes) os.remove(temp_file) return eval_results
Compute the metrics from processed results. Args: results (list): The processed results of each batch. Returns: dict: The computed metrics. The keys are the names of the metrics, and the values are corresponding results.
compute_metrics
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/ava_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/ava_metric.py
Apache-2.0
def process(self, data_batch: Optional[Dict], data_samples: Sequence[Dict]) -> None: """Process one batch of data samples and data_samples. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (dict, optional): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model. """ data_samples = copy.deepcopy(data_samples) for data_sample in data_samples: results = dict() features = data_sample['features'] video_feature = features['video_feature'].cpu().numpy() text_feature = features['text_feature'].cpu().numpy() results['video_feature'] = video_feature results['text_feature'] = text_feature self.results.append(results)
Process one batch of data samples and data_samples. The processed results should be stored in ``self.results``, which will be used to compute the metrics when all batches have been processed. Args: data_batch (dict, optional): A batch of data from the dataloader. data_samples (Sequence[dict]): A batch of outputs from the model.
process
python
open-mmlab/mmaction2
mmaction/evaluation/metrics/retrieval_metric.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/metrics/retrieval_metric.py
Apache-2.0
def _make_stem_layer(self) -> None: """Construct the stem layers consists of a conv+norm+act module and a pooling layer.""" self.conv1 = ConvModule( self.in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.maxpool3d_1 = nn.MaxPool3d( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 0)) self.maxpool3d_2 = nn.MaxPool3d( kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0))
Construct the stem layers consists of a conv+norm+act module and a pooling layer.
_make_stem_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/c2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c2d.py
Apache-2.0
def forward(self, x: torch.Tensor) \ -> Union[torch.Tensor, Tuple[torch.Tensor]]: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: Union[torch.Tensor or Tuple[torch.Tensor]]: The feature of the input samples extracted by the backbone. """ batches = x.shape[0] def _convert_to_2d(x: torch.Tensor) -> torch.Tensor: """(N, C, T, H, W) -> (N x T, C, H, W)""" x = x.permute((0, 2, 1, 3, 4)) x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4]) return x def _convert_to_3d(x: torch.Tensor) -> torch.Tensor: """(N x T, C, H, W) -> (N, C, T, H, W)""" x = x.reshape(batches, -1, x.shape[1], x.shape[2], x.shape[3]) x = x.permute((0, 2, 1, 3, 4)) return x x = _convert_to_2d(x) x = self.conv1(x) x = _convert_to_3d(x) x = self.maxpool3d_1(x) x = _convert_to_2d(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i == 0: x = _convert_to_3d(x) x = self.maxpool3d_2(x) x = _convert_to_2d(x) if i in self.out_indices: x = _convert_to_3d(x) outs.append(x) if len(outs) == 1: return outs[0] return tuple(outs)
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: Union[torch.Tensor or Tuple[torch.Tensor]]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/c2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c2d.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. the size of x is (num_batches, 3, 16, 112, 112). Returns: torch.Tensor: The feature of the input samples extracted by the backbone. """ x = self.conv1a(x) x = self.pool1(x) x = self.conv2a(x) x = self.pool2(x) x = self.conv3a(x) x = self.conv3b(x) x = self.pool3(x) x = self.conv4a(x) x = self.conv4b(x) x = self.pool4(x) x = self.conv5a(x) x = self.conv5b(x) x = self.pool5(x) x = x.flatten(start_dim=1) x = self.relu(self.fc6(x)) x = self.dropout(x) x = self.relu(self.fc7(x)) return x
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. the size of x is (num_batches, 3, 16, 112, 112). Returns: torch.Tensor: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/c3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/c3d.py
Apache-2.0
def make_divisible(value, divisor, min_value=None, min_ratio=0.9): """Make divisible function. This function rounds the channel number down to the nearest value that can be divisible by the divisor. Args: value (int): The original channel number. divisor (int): The divisor to fully divide the channel number. min_value (int, optional): The minimum value of the output channel. Defaults to None, means that the minimum value equal to the divisor. min_ratio (float, optional): The minimum ratio of the rounded channel number to the original channel number. Defaults to 0.9. Returns: int: The modified output channel number """ if min_value is None: min_value = divisor new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than (1-min_ratio). if new_value < min_ratio * value: new_value += divisor return new_value
Make divisible function. This function rounds the channel number down to the nearest value that can be divisible by the divisor. Args: value (int): The original channel number. divisor (int): The divisor to fully divide the channel number. min_value (int, optional): The minimum value of the output channel. Defaults to None, means that the minimum value equal to the divisor. min_ratio (float, optional): The minimum ratio of the rounded channel number to the original channel number. Defaults to 0.9. Returns: int: The modified output channel number
make_divisible
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The output of the module. """ def _inner_forward(x): if self.use_res_connect: return x + self.conv(x) return self.conv(x) if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py
Apache-2.0
def make_layer(self, out_channels, num_blocks, stride, expand_ratio): """Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Defaults to 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Defaults to 6. """ layers = [] for i in range(num_blocks): if i >= 1: stride = 1 layers.append( InvertedResidual( self.in_channels, out_channels, stride, expand_ratio=expand_ratio, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers)
Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Defaults to 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Defaults to 6.
make_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor or Tuple[Tensor]: The feature of the input samples extracted by the backbone. """ x = self.conv1(x) outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) if len(outs) == 1: return outs[0] return tuple(outs)
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor or Tuple[Tensor]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py
Apache-2.0
def _freeze_stages(self): """Prevent all the parameters from being optimized before ``self.frozen_stages``.""" if self.frozen_stages >= 0: self.conv1.eval() for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): layer_name = self.layers[i - 1] layer = getattr(self, layer_name) layer.eval() for param in layer.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_stages``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py
Apache-2.0
def train(self, mode=True): """Set the optimization status when training.""" super(MobileNetV2, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
Set the optimization status when training.
train
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2.py
Apache-2.0
def make_temporal_shift(self): """Make temporal shift for some layers.""" for m in self.modules(): if isinstance(m, InvertedResidual) and \ len(m.conv) == 3 and m.use_res_connect: m.conv[0] = TemporalShift( m.conv[0], num_segments=self.num_segments, shift_div=self.shift_div, )
Make temporal shift for some layers.
make_temporal_shift
python
open-mmlab/mmaction2
mmaction/models/backbones/mobilenet_v2_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobilenet_v2_tsm.py
Apache-2.0
def make_temporal_shift(self): """Make temporal shift for some layers. To make reparameterization work, we can only build the shift layer before the 'block', instead of the 'blockres' """ def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): blocks[i] = TemporalShift( b, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks) self.stage0 = make_block_temporal( nn.Sequential(self.stage0), self.num_segments)[0] for i in range(1, 5): temporal_stage = make_block_temporal( getattr(self, f'stage{i}'), self.num_segments) setattr(self, f'stage{i}', temporal_stage)
Make temporal shift for some layers. To make reparameterization work, we can only build the shift layer before the 'block', instead of the 'blockres'
make_temporal_shift
python
open-mmlab/mmaction2
mmaction/models/backbones/mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobileone_tsm.py
Apache-2.0
def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): blocks[i] = TemporalShift( b, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks)
Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks.
make_block_temporal
python
open-mmlab/mmaction2
mmaction/models/backbones/mobileone_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mobileone_tsm.py
Apache-2.0
def resize_pos_embed(pos_embed: torch.Tensor, src_shape: Tuple[int], dst_shape: Tuple[int], mode: str = 'trilinear', num_extra_tokens: int = 1) -> torch.Tensor: """Resize pos_embed weights. Args: pos_embed (torch.Tensor): Position embedding weights with shape [1, L, C]. src_shape (tuple): The resolution of downsampled origin training image, in format (T, H, W). dst_shape (tuple): The resolution of downsampled new training image, in format (T, H, W). mode (str): Algorithm used for upsampling. Choose one from 'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'. Defaults to 'trilinear'. num_extra_tokens (int): The number of extra tokens, such as cls_token. Defaults to 1. Returns: torch.Tensor: The resized pos_embed of shape [1, L_new, C] """ if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1] \ and src_shape[2] == dst_shape[2]: return pos_embed assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' _, L, C = pos_embed.shape src_t, src_h, src_w = src_shape assert L == src_t * src_h * src_w + num_extra_tokens, \ f"The length of `pos_embed` ({L}) doesn't match the expected " \ f'shape ({src_t}*{src_h}*{src_w}+{num_extra_tokens}).' \ 'Please check the `img_size` argument.' extra_tokens = pos_embed[:, :num_extra_tokens] src_weight = pos_embed[:, num_extra_tokens:] src_weight = src_weight.reshape(1, src_t, src_h, src_w, C).permute(0, 4, 1, 2, 3) dst_weight = F.interpolate( src_weight, size=dst_shape, align_corners=False, mode=mode) dst_weight = torch.flatten(dst_weight, 2).transpose(1, 2) return torch.cat((extra_tokens, dst_weight), dim=1)
Resize pos_embed weights. Args: pos_embed (torch.Tensor): Position embedding weights with shape [1, L, C]. src_shape (tuple): The resolution of downsampled origin training image, in format (T, H, W). dst_shape (tuple): The resolution of downsampled new training image, in format (T, H, W). mode (str): Algorithm used for upsampling. Choose one from 'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'. Defaults to 'trilinear'. num_extra_tokens (int): The number of extra tokens, such as cls_token. Defaults to 1. Returns: torch.Tensor: The resized pos_embed of shape [1, L_new, C]
resize_pos_embed
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def resize_decomposed_rel_pos(rel_pos: torch.Tensor, q_size: int, k_size: int) -> torch.Tensor: """Get relative positional embeddings according to the relative positions of query and key sizes. Args: rel_pos (Tensor): relative position embeddings (L, C). q_size (int): size of query q. k_size (int): size of key k. Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. resized = F.interpolate( # (L, C) -> (1, C, L) rel_pos.transpose(0, 1).unsqueeze(0), size=max_rel_dist, mode='linear', ) # (1, C, L) -> (L, C) resized = resized.squeeze(0).transpose(0, 1) else: resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_h_ratio = max(k_size / q_size, 1.0) k_h_ratio = max(q_size / k_size, 1.0) q_coords = torch.arange(q_size)[:, None] * q_h_ratio k_coords = torch.arange(k_size)[None, :] * k_h_ratio relative_coords = (q_coords - k_coords) + (k_size - 1) * k_h_ratio return resized[relative_coords.long()]
Get relative positional embeddings according to the relative positions of query and key sizes. Args: rel_pos (Tensor): relative position embeddings (L, C). q_size (int): size of query q. k_size (int): size of key k. Returns: Extracted positional embeddings according to relative positions.
resize_decomposed_rel_pos
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def attention_pool(x: torch.Tensor, pool: nn.Module, in_size: Tuple[int], with_cls_token: bool = False, norm: Optional[nn.Module] = None) -> tuple: """Pooling the feature tokens. Args: x (torch.Tensor): The input tensor, should be with shape ``(B, num_heads, L, C)`` or ``(B, L, C)``. pool (nn.Module): The pooling module. in_size (Tuple[int]): The shape of the input feature map. with_cls_token (bool): Whether concatenating class token into video tokens as transformer input. Defaults to True. norm (nn.Module, optional): The normalization module. Defaults to None. """ ndim = x.ndim if ndim == 4: B, num_heads, L, C = x.shape elif ndim == 3: num_heads = 1 B, L, C = x.shape x = x.unsqueeze(1) else: raise RuntimeError(f'Unsupported input dimension {x.shape}') T, H, W = in_size assert L == T * H * W + with_cls_token if with_cls_token: cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :] # (B, num_heads, T*H*W, C) -> (B*num_heads, C, T, H, W) x = x.reshape(B * num_heads, T, H, W, C).permute(0, 4, 1, 2, 3).contiguous() x = pool(x) out_size = x.shape[2:] # (B*num_heads, C, T', H', W') -> (B, num_heads, T'*H'*W', C) x = x.reshape(B, num_heads, C, -1).transpose(2, 3) if with_cls_token: x = torch.cat((cls_tok, x), dim=2) if norm is not None: x = norm(x) if ndim == 3: x = x.squeeze(1) return x, out_size
Pooling the feature tokens. Args: x (torch.Tensor): The input tensor, should be with shape ``(B, num_heads, L, C)`` or ``(B, L, C)``. pool (nn.Module): The pooling module. in_size (Tuple[int]): The shape of the input feature map. with_cls_token (bool): Whether concatenating class token into video tokens as transformer input. Defaults to True. norm (nn.Module, optional): The normalization module. Defaults to None.
attention_pool
python
open-mmlab/mmaction2
mmaction/models/backbones/mvit.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/mvit.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ identity = x out = self.conv1(x) out = self.conv2(out) if self.downsample is not None: identity = self.downsample(x) out = out + identity out = self.relu(out) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def make_res_layer(block: nn.Module, inplanes: int, planes: int, blocks: int, stride: int = 1, dilation: int = 1, style: str = 'pytorch', conv_cfg: Optional[ConfigType] = None, norm_cfg: Optional[ConfigType] = None, act_cfg: Optional[ConfigType] = None, with_cp: bool = False) -> nn.Module: """Build residual layer for ResNet. Args: block: (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Stride in the conv layer. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``, the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. Defaults to ``pytorch``. conv_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. act_cfg (Union[dict, ConfigDict], optional): Config for activate layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config. """ downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = ConvModule( inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) layers = [] layers.append( block( inplanes, planes, stride, dilation, downsample, style=style, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp)) inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( block( inplanes, planes, 1, dilation, style=style, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_cp=with_cp)) return nn.Sequential(*layers)
Build residual layer for ResNet. Args: block: (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Stride in the conv layer. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. style (str): ``pytorch`` or ``caffe``. If set to ``pytorch``, the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. Defaults to ``pytorch``. conv_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. act_cfg (Union[dict, ConfigDict], optional): Config for activate layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config.
make_res_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _load_conv_params(conv: nn.Module, state_dict_tv: OrderedDict, module_name_tv: str, loaded_param_names: List[str]) -> None: """Load the conv parameters of resnet from torchvision. Args: conv (nn.Module): The destination conv module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding conv module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded. """ weight_tv_name = module_name_tv + '.weight' if conv.weight.data.shape == state_dict_tv[weight_tv_name].shape: conv.weight.data.copy_(state_dict_tv[weight_tv_name]) loaded_param_names.append(weight_tv_name) if getattr(conv, 'bias') is not None: bias_tv_name = module_name_tv + '.bias' if conv.bias.data.shape == state_dict_tv[bias_tv_name].shape: conv.bias.data.copy_(state_dict_tv[bias_tv_name]) loaded_param_names.append(bias_tv_name)
Load the conv parameters of resnet from torchvision. Args: conv (nn.Module): The destination conv module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding conv module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded.
_load_conv_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _load_bn_params(bn: nn.Module, state_dict_tv: OrderedDict, module_name_tv: str, loaded_param_names: List[str]) -> None: """Load the bn parameters of resnet from torchvision. Args: bn (nn.Module): The destination bn module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding bn module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded. """ for param_name, param in bn.named_parameters(): param_tv_name = f'{module_name_tv}.{param_name}' param_tv = state_dict_tv[param_tv_name] if param.data.shape == param_tv.shape: param.data.copy_(param_tv) loaded_param_names.append(param_tv_name) for param_name, param in bn.named_buffers(): param_tv_name = f'{module_name_tv}.{param_name}' # some buffers like num_batches_tracked may not exist if param_tv_name in state_dict_tv: param_tv = state_dict_tv[param_tv_name] if param.data.shape == param_tv.shape: param.data.copy_(param_tv) loaded_param_names.append(param_tv_name)
Load the bn parameters of resnet from torchvision. Args: bn (nn.Module): The destination bn module. state_dict_tv (OrderedDict): The state dict of pretrained torchvision model. module_name_tv (str): The name of corresponding bn module in the torchvision model. loaded_param_names (list[str]): List of parameters that have been loaded.
_load_bn_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def _load_torchvision_checkpoint(self, logger: mmengine.MMLogger = None) -> None: """Initiate the parameters from torchvision pretrained checkpoint.""" state_dict_torchvision = _load_checkpoint( self.pretrained, map_location='cpu') if 'state_dict' in state_dict_torchvision: state_dict_torchvision = state_dict_torchvision['state_dict'] loaded_param_names = [] for name, module in self.named_modules(): if isinstance(module, ConvModule): # we use a ConvModule to wrap conv+bn+relu layers, thus the # name mapping is needed if 'downsample' in name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 original_conv_name = name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 original_bn_name = name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} original_conv_name = name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} original_bn_name = name.replace('conv', 'bn') self._load_conv_params(module.conv, state_dict_torchvision, original_conv_name, loaded_param_names) self._load_bn_params(module.bn, state_dict_torchvision, original_bn_name, loaded_param_names) # check if any parameters in the 2d checkpoint are not loaded remaining_names = set( state_dict_torchvision.keys()) - set(loaded_param_names) if remaining_names: logger.info( f'These parameters in pretrained checkpoint are not loaded' f': {remaining_names}')
Initiate the parameters from torchvision pretrained checkpoint.
_load_torchvision_checkpoint
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone. """ x = self.conv1(x) x = self.maxpool(x) for layer_name in self.res_layers: res_layer = getattr(self, layer_name) # no pool2 in R(2+1)d x = res_layer(x) return x
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet2plus1d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet2plus1d.py
Apache-2.0
def make_res_layer(block: nn.Module, inplanes: int, planes: int, blocks: int, spatial_stride: Union[int, Sequence[int]] = 1, temporal_stride: Union[int, Sequence[int]] = 1, dilation: int = 1, style: str = 'pytorch', inflate: Union[int, Sequence[int]] = 1, inflate_style: str = '3x1x1', non_local: Union[int, Sequence[int]] = 0, non_local_cfg: Dict = dict(), norm_cfg: Optional[Dict] = None, act_cfg: Optional[Dict] = None, conv_cfg: Optional[Dict] = None, with_cp: bool = False, **kwargs) -> nn.Module: """Build residual layer for ResNet3D. Args: block (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. spatial_stride (int | Sequence[int]): Spatial strides in residual and conv layers. Defaults to 1. temporal_stride (int | Sequence[int]): Temporal strides in residual and conv layers. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. style (str): 'pytorch' or 'caffe'. If set to 'pytorch', the stride-two layer is the 3x3 conv layer,otherwise the stride-two layer is the first 1x1 conv layer. Defaults to ``'pytorch'``. inflate (int | Sequence[int]): Determine whether to inflate for each block. Defaults to 1. inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the kernel sizes and padding strides for conv1 and conv2 in each block. Default: ``'3x1x1'``. non_local (int | Sequence[int]): Determine whether to apply non-local module in the corresponding block of each stages. Defaults to 0. non_local_cfg (dict): Config for non-local module. Defaults to ``dict()``. conv_cfg (dict, optional): Config for conv layers. Defaults to None. norm_cfg (dict, optional): Config for norm layers. Defaults to None. act_cfg (dict, optional): Config for activate layers. Defaults to None. with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config. """ inflate = inflate if not isinstance(inflate, int) \ else (inflate,) * blocks non_local = non_local if not isinstance(non_local, int) \ else (non_local,) * blocks assert len(inflate) == blocks and len(non_local) == blocks downsample = None if spatial_stride != 1 or inplanes != planes * block.expansion: downsample = ConvModule( inplanes, planes * block.expansion, kernel_size=1, stride=(temporal_stride, spatial_stride, spatial_stride), bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) layers = [] layers.append( block( inplanes, planes, spatial_stride=spatial_stride, temporal_stride=temporal_stride, dilation=dilation, downsample=downsample, style=style, inflate=(inflate[0] == 1), inflate_style=inflate_style, non_local=(non_local[0] == 1), non_local_cfg=non_local_cfg, norm_cfg=norm_cfg, conv_cfg=conv_cfg, act_cfg=act_cfg, with_cp=with_cp, **kwargs)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, spatial_stride=1, temporal_stride=1, dilation=dilation, style=style, inflate=(inflate[i] == 1), inflate_style=inflate_style, non_local=(non_local[i] == 1), non_local_cfg=non_local_cfg, norm_cfg=norm_cfg, conv_cfg=conv_cfg, act_cfg=act_cfg, with_cp=with_cp, **kwargs)) return Sequential(*layers)
Build residual layer for ResNet3D. Args: block (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. spatial_stride (int | Sequence[int]): Spatial strides in residual and conv layers. Defaults to 1. temporal_stride (int | Sequence[int]): Temporal strides in residual and conv layers. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. style (str): 'pytorch' or 'caffe'. If set to 'pytorch', the stride-two layer is the 3x3 conv layer,otherwise the stride-two layer is the first 1x1 conv layer. Defaults to ``'pytorch'``. inflate (int | Sequence[int]): Determine whether to inflate for each block. Defaults to 1. inflate_style (str): ``3x1x1`` or ``3x3x3``. which determines the kernel sizes and padding strides for conv1 and conv2 in each block. Default: ``'3x1x1'``. non_local (int | Sequence[int]): Determine whether to apply non-local module in the corresponding block of each stages. Defaults to 0. non_local_cfg (dict): Config for non-local module. Defaults to ``dict()``. conv_cfg (dict, optional): Config for conv layers. Defaults to None. norm_cfg (dict, optional): Config for norm layers. Defaults to None. act_cfg (dict, optional): Config for activate layers. Defaults to None. with_cp (bool, optional): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config.
make_res_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _inflate_conv_params(conv3d: nn.Module, state_dict_2d: OrderedDict, module_name_2d: str, inflated_param_names: List[str]) -> None: """Inflate a conv module from 2d to 3d. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated. """ weight_2d_name = module_name_2d + '.weight' conv2d_weight = state_dict_2d[weight_2d_name] kernel_t = conv3d.weight.data.shape[2] new_weight = conv2d_weight.data.unsqueeze(2).expand_as( conv3d.weight) / kernel_t conv3d.weight.data.copy_(new_weight) inflated_param_names.append(weight_2d_name) if getattr(conv3d, 'bias') is not None: bias_2d_name = module_name_2d + '.bias' conv3d.bias.data.copy_(state_dict_2d[bias_2d_name]) inflated_param_names.append(bias_2d_name)
Inflate a conv module from 2d to 3d. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated.
_inflate_conv_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _inflate_bn_params(bn3d: nn.Module, state_dict_2d: OrderedDict, module_name_2d: str, inflated_param_names: List[str]) -> None: """Inflate a norm module from 2d to 3d. Args: bn3d (nn.Module): The destination bn3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding bn module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated. """ for param_name, param in bn3d.named_parameters(): param_2d_name = f'{module_name_2d}.{param_name}' param_2d = state_dict_2d[param_2d_name] if param.data.shape != param_2d.shape: warnings.warn(f'The parameter of {module_name_2d} is not' 'loaded due to incompatible shapes. ') return param.data.copy_(param_2d) inflated_param_names.append(param_2d_name) for param_name, param in bn3d.named_buffers(): param_2d_name = f'{module_name_2d}.{param_name}' # some buffers like num_batches_tracked may not exist in old # checkpoints if param_2d_name in state_dict_2d: param_2d = state_dict_2d[param_2d_name] param.data.copy_(param_2d) inflated_param_names.append(param_2d_name)
Inflate a norm module from 2d to 3d. Args: bn3d (nn.Module): The destination bn3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding bn module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated.
_inflate_bn_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _inflate_weights(self, logger: MMLogger) -> None: """Inflate the resnet2d parameters to resnet3d. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information. """ state_dict_r2d = _load_checkpoint(self.pretrained, map_location='cpu') if 'state_dict' in state_dict_r2d: state_dict_r2d = state_dict_r2d['state_dict'] inflated_param_names = [] for name, module in self.named_modules(): if isinstance(module, ConvModule): # we use a ConvModule to wrap conv+bn+relu layers, thus the # name mapping is needed if 'downsample' in name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 original_conv_name = name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 original_bn_name = name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} original_conv_name = name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} original_bn_name = name.replace('conv', 'bn') if original_conv_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_conv_name}') else: shape_2d = state_dict_r2d[original_conv_name + '.weight'].shape shape_3d = module.conv.weight.data.shape if shape_2d != shape_3d[:2] + shape_3d[3:]: logger.warning(f'Weight shape mismatch for ' f': {original_conv_name} : ' f'3d weight shape: {shape_3d}; ' f'2d weight shape: {shape_2d}. ') else: self._inflate_conv_params(module.conv, state_dict_r2d, original_conv_name, inflated_param_names) if original_bn_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_bn_name}') else: self._inflate_bn_params(module.bn, state_dict_r2d, original_bn_name, inflated_param_names) # check if any parameters in the 2d checkpoint are not loaded remaining_names = set( state_dict_r2d.keys()) - set(inflated_param_names) if remaining_names: logger.info(f'These parameters in the 2d checkpoint are not loaded' f': {remaining_names}')
Inflate the resnet2d parameters to resnet3d. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information.
_inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def _init_weights(self, pretrained: Optional[str] = None) -> None: """Initiate the parameters either from existing checkpoint or from scratch. Args: pretrained (str | None): The path of the pretrained weight. Will override the original `pretrained` if set. The arg is added to be compatible with mmdet. Defaults to None. """ if pretrained: self.pretrained = pretrained if isinstance(self.pretrained, str): logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') if self.pretrained2d: # Inflate 2D model into 3D model. self.inflate_weights(logger) else: # Directly load 3D model. load_checkpoint( self, self.pretrained, strict=False, logger=logger) elif self.pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv3d): kaiming_init(m) elif isinstance(m, _BatchNorm): constant_init(m, 1) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck3d): constant_init(m.conv3.bn, 0) elif isinstance(m, BasicBlock3d): constant_init(m.conv2.bn, 0) else: raise TypeError('pretrained must be a str or None')
Initiate the parameters either from existing checkpoint or from scratch. Args: pretrained (str | None): The path of the pretrained weight. Will override the original `pretrained` if set. The arg is added to be compatible with mmdet. Defaults to None.
_init_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def forward(self, x: torch.Tensor) \ -> Union[torch.Tensor, Tuple[torch.Tensor]]: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor or tuple[torch.Tensor]: The feature of the input samples extracted by the backbone. """ x = self.conv1(x) if self.with_pool1: x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i == 0 and self.with_pool2: x = self.pool2(x) if i in self.out_indices: outs.append(x) if len(outs) == 1: return outs[0] return tuple(outs)
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor or tuple[torch.Tensor]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the residual layer. """ res_layer = getattr(self, self.layer_name) out = res_layer(x) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the residual layer.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d.py
Apache-2.0
def inflate_weights(self, logger: MMLogger) -> None: """Inflate the resnet2d parameters to resnet3d pathway. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. For pathway the ``lateral_connection`` part should not be inflated from 2d weights. Args: logger (MMLogger): The logger used to print debugging information. """ state_dict_r2d = _load_checkpoint(self.pretrained, map_location='cpu') if 'state_dict' in state_dict_r2d: state_dict_r2d = state_dict_r2d['state_dict'] inflated_param_names = [] for name, module in self.named_modules(): if 'lateral' in name: continue if isinstance(module, ConvModule): # we use a ConvModule to wrap conv+bn+relu layers, thus the # name mapping is needed if 'downsample' in name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 original_conv_name = name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 original_bn_name = name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} original_conv_name = name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} original_bn_name = name.replace('conv', 'bn') if original_conv_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_conv_name}') else: self._inflate_conv_params(module.conv, state_dict_r2d, original_conv_name, inflated_param_names) if original_bn_name + '.weight' not in state_dict_r2d: logger.warning(f'Module not exist in the state_dict_r2d' f': {original_bn_name}') else: self._inflate_bn_params(module.bn, state_dict_r2d, original_bn_name, inflated_param_names) # check if any parameters in the 2d checkpoint are not loaded remaining_names = set( state_dict_r2d.keys()) - set(inflated_param_names) if remaining_names: logger.info(f'These parameters in the 2d checkpoint are not loaded' f': {remaining_names}')
Inflate the resnet2d parameters to resnet3d pathway. The differences between resnet3d and resnet2d mainly lie in an extra axis of conv kernel. To utilize the pretrained parameters in 2d model, the weight of conv2d models should be inflated to fit in the shapes of the 3d counterpart. For pathway the ``lateral_connection`` part should not be inflated from 2d weights. Args: logger (MMLogger): The logger used to print debugging information.
inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def _inflate_conv_params(self, conv3d: nn.Module, state_dict_2d: OrderedDict, module_name_2d: str, inflated_param_names: List[str]) -> None: """Inflate a conv module from 2d to 3d. The differences of conv modules betweene 2d and 3d in Pathway mainly lie in the inplanes due to lateral connections. To fit the shapes of the lateral connection counterpart, it will expand parameters by concatting conv2d parameters and extra zero paddings. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated. """ weight_2d_name = module_name_2d + '.weight' conv2d_weight = state_dict_2d[weight_2d_name] old_shape = conv2d_weight.shape new_shape = conv3d.weight.data.shape kernel_t = new_shape[2] if new_shape[1] != old_shape[1]: if new_shape[1] < old_shape[1]: warnings.warn(f'The parameter of {module_name_2d} is not' 'loaded due to incompatible shapes. ') return # Inplanes may be different due to lateral connections new_channels = new_shape[1] - old_shape[1] pad_shape = old_shape pad_shape = pad_shape[:1] + (new_channels, ) + pad_shape[2:] # Expand parameters by concat extra channels conv2d_weight = torch.cat( (conv2d_weight, torch.zeros(pad_shape).type_as(conv2d_weight).to( conv2d_weight.device)), dim=1) new_weight = conv2d_weight.data.unsqueeze(2).expand_as( conv3d.weight) / kernel_t conv3d.weight.data.copy_(new_weight) inflated_param_names.append(weight_2d_name) if getattr(conv3d, 'bias') is not None: bias_2d_name = module_name_2d + '.bias' conv3d.bias.data.copy_(state_dict_2d[bias_2d_name]) inflated_param_names.append(bias_2d_name)
Inflate a conv module from 2d to 3d. The differences of conv modules betweene 2d and 3d in Pathway mainly lie in the inplanes due to lateral connections. To fit the shapes of the lateral connection counterpart, it will expand parameters by concatting conv2d parameters and extra zero paddings. Args: conv3d (nn.Module): The destination conv3d module. state_dict_2d (OrderedDict): The state dict of pretrained 2d model. module_name_2d (str): The name of corresponding conv module in the 2d model. inflated_param_names (list[str]): List of parameters that have been inflated.
_inflate_conv_params
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before `self.frozen_stages`.""" if self.frozen_stages >= 0: self.conv1.eval() for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, f'layer{i}') m.eval() for param in m.parameters(): param.requires_grad = False if i != len(self.res_layers) and self.lateral: # No fusion needed in the final stage lateral_name = self.lateral_connections[i - 1] conv_lateral = getattr(self, lateral_name) conv_lateral.eval() for param in conv_lateral.parameters(): param.requires_grad = False
Prevent all the parameters from being optimized before `self.frozen_stages`.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def build_pathway(cfg: Dict, *args, **kwargs) -> nn.Module: """Build pathway. Args: cfg (dict): cfg should contain: - type (str): identify backbone type. Returns: nn.Module: Created pathway. """ if not (isinstance(cfg, dict) and 'type' in cfg): raise TypeError('cfg must be a dict containing the key "type"') cfg_ = cfg.copy() pathway_type = cfg_.pop('type') if pathway_type not in pathway_cfg: raise KeyError(f'Unrecognized pathway type {pathway_type}') pathway_cls = pathway_cfg[pathway_type] pathway = pathway_cls(*args, **kwargs, **cfg_) return pathway
Build pathway. Args: cfg (dict): cfg should contain: - type (str): identify backbone type. Returns: nn.Module: Created pathway.
build_pathway
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def forward(self, x: torch.Tensor) -> tuple: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone. """ x_slow = nn.functional.interpolate( x, mode='nearest', scale_factor=(1.0 / self.resample_rate, 1.0, 1.0)) x_slow = self.slow_path.conv1(x_slow) x_slow = self.slow_path.maxpool(x_slow) x_fast = nn.functional.interpolate( x, mode='nearest', scale_factor=(1.0 / (self.resample_rate // self.speed_ratio), 1.0, 1.0)) x_fast = self.fast_path.conv1(x_fast) x_fast = self.fast_path.maxpool(x_fast) if self.slow_path.lateral: x_fast_lateral = self.slow_path.conv1_lateral(x_fast) x_slow = torch.cat((x_slow, x_fast_lateral), dim=1) for i, layer_name in enumerate(self.slow_path.res_layers): res_layer = getattr(self.slow_path, layer_name) x_slow = res_layer(x_slow) res_layer_fast = getattr(self.fast_path, layer_name) x_fast = res_layer_fast(x_fast) if (i != len(self.slow_path.res_layers) - 1 and self.slow_path.lateral): # No fusion needed in the final stage lateral_name = self.slow_path.lateral_connections[i] conv_lateral = getattr(self.slow_path, lateral_name) x_fast_lateral = conv_lateral(x_fast) x_slow = torch.cat((x_slow, x_fast_lateral), dim=1) out = (x_slow, x_fast) return out
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet3d_slowfast.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet3d_slowfast.py
Apache-2.0
def make_res_layer(block: nn.Module, inplanes: int, planes: int, blocks: int, stride: int = 1, dilation: int = 1, factorize: int = 1, norm_cfg: Optional[ConfigType] = None, with_cp: bool = False) -> nn.Module: """Build residual layer for ResNetAudio. Args: block (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Strides of residual blocks of each stage. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. factorize (Uninon[int, Sequence[int]]): Determine whether to factorize for each block. Defaults to 1. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config. """ factorize = factorize if not isinstance( factorize, int) else (factorize, ) * blocks assert len(factorize) == blocks downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = ConvModule( inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, norm_cfg=norm_cfg, act_cfg=None) layers = [] layers.append( block( inplanes, planes, stride, dilation, downsample, factorize=(factorize[0] == 1), norm_cfg=norm_cfg, with_cp=with_cp)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, 1, dilation, factorize=(factorize[i] == 1), norm_cfg=norm_cfg, with_cp=with_cp)) return nn.Sequential(*layers)
Build residual layer for ResNetAudio. Args: block (nn.Module): Residual module to be built. inplanes (int): Number of channels for the input feature in each block. planes (int): Number of channels for the output feature in each block. blocks (int): Number of residual blocks. stride (int): Strides of residual blocks of each stage. Defaults to 1. dilation (int): Spacing between kernel elements. Defaults to 1. factorize (Uninon[int, Sequence[int]]): Determine whether to factorize for each block. Defaults to 1. norm_cfg (Union[dict, ConfigDict], optional): Config for norm layers. Defaults to None. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Defaults to False. Returns: nn.Module: A residual layer for the given config.
make_res_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def _make_stem_layer(self) -> None: """Construct the stem layers consists of a ``conv+norm+act`` module and a pooling layer.""" self.conv1 = ConvModule( self.in_channels, self.base_channels, kernel_size=self.conv1_kernel, stride=self.conv1_stride, bias=False, conv_cfg=dict(type='ConvAudio', op='sum'), norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
Construct the stem layers consists of a ``conv+norm+act`` module and a pooling layer.
_make_stem_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone. """ x = self.conv1(x) for layer_name in self.res_layers: res_layer = getattr(self, layer_name) x = res_layer(x) return x
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_audio.py
Apache-2.0
def batch_norm(inputs: torch.Tensor, module: nn.modules.batchnorm, training: Optional[bool] = None) -> torch.Tensor: """Applies Batch Normalization for each channel across a batch of data using params from the given batch normalization module. Args: inputs (Tensor): The input data. module (nn.modules.batchnorm): a batch normalization module. Will use params from this batch normalization module to do the operation. training (bool, optional): if true, apply the train mode batch normalization. Defaults to None and will use the training mode of the module. """ if training is None: training = module.training return F.batch_norm( input=inputs, running_mean=None if training else module.running_mean, running_var=None if training else module.running_var, weight=module.weight, bias=module.bias, training=training, momentum=module.momentum, eps=module.eps)
Applies Batch Normalization for each channel across a batch of data using params from the given batch normalization module. Args: inputs (Tensor): The input data. module (nn.modules.batchnorm): a batch normalization module. Will use params from this batch normalization module to do the operation. training (bool, optional): if true, apply the train mode batch normalization. Defaults to None and will use the training mode of the module.
batch_norm
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_omni.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors. """ if x.ndim == 4: return self.forward_2d(x) # Forward call for 3D tensors. out = self.conv1(x) out = self.bn1(out).relu_() out = self.conv2(out) out = self.bn2(out).relu_() out = self.conv3(out) out = self.bn3(out) if hasattr(self, 'downsample'): x = self.downsample(x) return out.add_(x).relu_()
Defines the computation performed at every call. Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_omni.py
Apache-2.0
def linear_sampler(data, offset): """Differentiable Temporal-wise Frame Sampling, which is essentially a linear interpolation process. It gets the feature map which has been split into several groups and shift them by different offsets according to their groups. Then compute the weighted sum along with the temporal dimension. Args: data (torch.Tensor): Split data for certain group in shape [N, num_segments, C, H, W]. offset (torch.Tensor): Data offsets for this group data in shape [N, num_segments]. """ # [N, num_segments, C, H, W] n, t, c, h, w = data.shape # offset0, offset1: [N, num_segments] offset0 = torch.floor(offset).int() offset1 = offset0 + 1 # data, data0, data1: [N, num_segments, C, H * W] data = data.view(n, t, c, h * w).contiguous() try: from mmcv.ops import tin_shift except (ImportError, ModuleNotFoundError): raise ImportError('Failed to import `tin_shift` from `mmcv.ops`. You ' 'will be unable to use TIN. ') data0 = tin_shift(data, offset0) data1 = tin_shift(data, offset1) # weight0, weight1: [N, num_segments] weight0 = 1 - (offset - offset0.float()) weight1 = 1 - weight0 # weight0, weight1: # [N, num_segments] -> [N, num_segments, C // num_segments] -> [N, C] group_size = offset.shape[1] weight0 = weight0[:, :, None].repeat(1, 1, c // group_size) weight0 = weight0.view(weight0.size(0), -1) weight1 = weight1[:, :, None].repeat(1, 1, c // group_size) weight1 = weight1.view(weight1.size(0), -1) # weight0, weight1: [N, C] -> [N, 1, C, 1] weight0 = weight0[:, None, :, None] weight1 = weight1[:, None, :, None] # output: [N, num_segments, C, H * W] -> [N, num_segments, C, H, W] output = weight0 * data0 + weight1 * data1 output = output.view(n, t, c, h, w) return output
Differentiable Temporal-wise Frame Sampling, which is essentially a linear interpolation process. It gets the feature map which has been split into several groups and shift them by different offsets according to their groups. Then compute the weighted sum along with the temporal dimension. Args: data (torch.Tensor): Split data for certain group in shape [N, num_segments, C, H, W]. offset (torch.Tensor): Data offsets for this group data in shape [N, num_segments].
linear_sampler
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py
Apache-2.0
def make_temporal_interlace(self): """Make temporal interlace for some layers.""" num_segment_list = [self.num_segments] * 4 assert num_segment_list[-1] > 0 n_round = 1 if len(list(self.layer3.children())) >= 23: print(f'=> Using n_round {n_round} to insert temporal shift.') def make_block_interlace(stage, num_segments, shift_div): """Apply Deformable shift for a ResNet layer module. Args: stage (nn.module): A ResNet layer to be deformed. num_segments (int): Number of frame segments. shift_div (int): Number of division parts for shift. Returns: nn.Sequential: A Sequential container consisted of deformed Interlace blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): if i % n_round == 0: tds = TemporalInterlace( b.conv1.in_channels, num_segments=num_segments, shift_div=shift_div) blocks[i].conv1.conv = CombineNet(tds, blocks[i].conv1.conv) return nn.Sequential(*blocks) self.layer1 = make_block_interlace(self.layer1, num_segment_list[0], self.shift_div) self.layer2 = make_block_interlace(self.layer2, num_segment_list[1], self.shift_div) self.layer3 = make_block_interlace(self.layer3, num_segment_list[2], self.shift_div) self.layer4 = make_block_interlace(self.layer4, num_segment_list[3], self.shift_div)
Make temporal interlace for some layers.
make_temporal_interlace
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py
Apache-2.0
def make_block_interlace(stage, num_segments, shift_div): """Apply Deformable shift for a ResNet layer module. Args: stage (nn.module): A ResNet layer to be deformed. num_segments (int): Number of frame segments. shift_div (int): Number of division parts for shift. Returns: nn.Sequential: A Sequential container consisted of deformed Interlace blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): if i % n_round == 0: tds = TemporalInterlace( b.conv1.in_channels, num_segments=num_segments, shift_div=shift_div) blocks[i].conv1.conv = CombineNet(tds, blocks[i].conv1.conv) return nn.Sequential(*blocks)
Apply Deformable shift for a ResNet layer module. Args: stage (nn.module): A ResNet layer to be deformed. num_segments (int): Number of frame segments. shift_div (int): Number of division parts for shift. Returns: nn.Sequential: A Sequential container consisted of deformed Interlace blocks.
make_block_interlace
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tin.py
Apache-2.0
def shift(x, num_segments, shift_div=3): """Perform temporal shift operation on the feature. Args: x (torch.Tensor): The input feature to be shifted. num_segments (int): Number of frame segments. shift_div (int): Number of divisions for shift. Default: 3. Returns: torch.Tensor: The shifted feature. """ # [N, C, H, W] n, c, h, w = x.size() # [N // num_segments, num_segments, C, H*W] # can't use 5 dimensional array on PPL2D backend for caffe x = x.view(-1, num_segments, c, h * w) # get shift fold fold = c // shift_div # split c channel into three parts: # left_split, mid_split, right_split left_split = x[:, :, :fold, :] mid_split = x[:, :, fold:2 * fold, :] right_split = x[:, :, 2 * fold:, :] # can't use torch.zeros(*A.shape) or torch.zeros_like(A) # because array on caffe inference must be got by computing # shift left on num_segments channel in `left_split` zeros = left_split - left_split blank = zeros[:, :1, :, :] left_split = left_split[:, 1:, :, :] left_split = torch.cat((left_split, blank), 1) # shift right on num_segments channel in `mid_split` zeros = mid_split - mid_split blank = zeros[:, :1, :, :] mid_split = mid_split[:, :-1, :, :] mid_split = torch.cat((blank, mid_split), 1) # right_split: no shift # concatenate out = torch.cat((left_split, mid_split, right_split), 2) # [N, C, H, W] # restore the original dimension return out.view(n, c, h, w)
Perform temporal shift operation on the feature. Args: x (torch.Tensor): The input feature to be shifted. num_segments (int): Number of frame segments. shift_div (int): Number of divisions for shift. Default: 3. Returns: torch.Tensor: The shifted feature.
shift
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_block_temporal(stage, num_segments): """Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks. """ blocks = list(stage.children()) for i, b in enumerate(blocks): blocks[i] = TemporalShift( b, num_segments=num_segments, shift_div=self.shift_div) return nn.Sequential(*blocks)
Make temporal shift on some blocks. Args: stage (nn.Module): Model layers to be shifted. num_segments (int): Number of frame segments. Returns: nn.Module: The shifted blocks.
make_block_temporal
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_temporal_pool(self): """Make temporal pooling between layer1 and layer2, using a 3D max pooling layer.""" class TemporalPool(nn.Module): """Temporal pool module. Wrap layer2 in ResNet50 with a 3D max pooling layer. Args: net (nn.Module): Module to make temporal pool. num_segments (int): Number of frame segments. """ def __init__(self, net, num_segments): super().__init__() self.net = net self.num_segments = num_segments self.max_pool3d = nn.MaxPool3d( kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)) def forward(self, x): """Defines the computation performed at every call.""" # [N, C, H, W] n, c, h, w = x.size() # [N // num_segments, C, num_segments, H, W] x = x.view(n // self.num_segments, self.num_segments, c, h, w).transpose(1, 2) # [N // num_segmnets, C, num_segments // 2, H, W] x = self.max_pool3d(x) # [N // 2, C, H, W] x = x.transpose(1, 2).contiguous().view(n // 2, c, h, w) return self.net(x) self.layer2 = TemporalPool(self.layer2, self.num_segments)
Make temporal pooling between layer1 and layer2, using a 3D max pooling layer.
make_temporal_pool
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def make_non_local(self): """Wrap resnet layer into non local wrapper.""" # This part is for ResNet50 for i in range(self.num_stages): non_local_stage = self.non_local_stages[i] if sum(non_local_stage) == 0: continue layer_name = f'layer{i + 1}' res_layer = getattr(self, layer_name) for idx, non_local in enumerate(non_local_stage): if non_local: res_layer[idx] = NL3DWrapper(res_layer[idx], self.num_segments, self.non_local_cfg)
Wrap resnet layer into non local wrapper.
make_non_local
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def load_original_weights(self, logger): """Load weights from original checkpoint, which required converting keys.""" state_dict_torchvision = _load_checkpoint( self.pretrained, map_location='cpu') if 'state_dict' in state_dict_torchvision: state_dict_torchvision = state_dict_torchvision['state_dict'] wrapped_layers_map = dict() for name, module in self.named_modules(): # convert torchvision keys ori_name = name for wrap_prefix in self._get_wrap_prefix(): if wrap_prefix in ori_name: ori_name = ori_name.replace(wrap_prefix, '') wrapped_layers_map[ori_name] = name if isinstance(module, ConvModule): if 'downsample' in ori_name: # layer{X}.{Y}.downsample.conv->layer{X}.{Y}.downsample.0 tv_conv_name = ori_name + '.0' # layer{X}.{Y}.downsample.bn->layer{X}.{Y}.downsample.1 tv_bn_name = ori_name + '.1' else: # layer{X}.{Y}.conv{n}.conv->layer{X}.{Y}.conv{n} tv_conv_name = ori_name # layer{X}.{Y}.conv{n}.bn->layer{X}.{Y}.bn{n} tv_bn_name = ori_name.replace('conv', 'bn') for conv_param in ['.weight', '.bias']: if tv_conv_name + conv_param in state_dict_torchvision: state_dict_torchvision[ori_name+'.conv'+conv_param] = \ state_dict_torchvision.pop(tv_conv_name+conv_param) for bn_param in [ '.weight', '.bias', '.running_mean', '.running_var' ]: if tv_bn_name + bn_param in state_dict_torchvision: state_dict_torchvision[ori_name+'.bn'+bn_param] = \ state_dict_torchvision.pop(tv_bn_name+bn_param) # convert wrapped keys for param_name in list(state_dict_torchvision.keys()): layer_name = '.'.join(param_name.split('.')[:-1]) if layer_name in wrapped_layers_map: wrapped_name = param_name.replace( layer_name, wrapped_layers_map[layer_name]) print(f'wrapped_name {wrapped_name}') state_dict_torchvision[ wrapped_name] = state_dict_torchvision.pop(param_name) msg = self.load_state_dict(state_dict_torchvision, strict=False) logger.info(msg)
Load weights from original checkpoint, which required converting keys.
load_original_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/resnet_tsm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/resnet_tsm.py
Apache-2.0
def forward(self, imgs: torch.Tensor, heatmap_imgs: torch.Tensor) -> tuple: """Defines the computation performed at every call. Args: imgs (torch.Tensor): The input data. heatmap_imgs (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone. """ if self.training: rgb_drop_path = torch.rand(1) < self.rgb_drop_path pose_drop_path = torch.rand(1) < self.pose_drop_path else: rgb_drop_path, pose_drop_path = False, False # We assume base_channel for RGB and Pose are 64 and 32. x_rgb = self.rgb_path.conv1(imgs) x_rgb = self.rgb_path.maxpool(x_rgb) # N x 64 x 8 x 56 x 56 x_pose = self.pose_path.conv1(heatmap_imgs) x_pose = self.pose_path.maxpool(x_pose) x_rgb = self.rgb_path.layer1(x_rgb) x_rgb = self.rgb_path.layer2(x_rgb) x_pose = self.pose_path.layer1(x_pose) if hasattr(self.rgb_path, 'layer2_lateral'): feat = x_pose.detach() if self.rgb_detach else x_pose x_pose_lateral = self.rgb_path.layer2_lateral(feat) if rgb_drop_path: x_pose_lateral = x_pose_lateral.new_zeros(x_pose_lateral.shape) if hasattr(self.pose_path, 'layer1_lateral'): feat = x_rgb.detach() if self.pose_detach else x_rgb x_rgb_lateral = self.pose_path.layer1_lateral(feat) if pose_drop_path: x_rgb_lateral = x_rgb_lateral.new_zeros(x_rgb_lateral.shape) if hasattr(self.rgb_path, 'layer2_lateral'): x_rgb = torch.cat((x_rgb, x_pose_lateral), dim=1) if hasattr(self.pose_path, 'layer1_lateral'): x_pose = torch.cat((x_pose, x_rgb_lateral), dim=1) x_rgb = self.rgb_path.layer3(x_rgb) x_pose = self.pose_path.layer2(x_pose) if hasattr(self.rgb_path, 'layer3_lateral'): feat = x_pose.detach() if self.rgb_detach else x_pose x_pose_lateral = self.rgb_path.layer3_lateral(feat) if rgb_drop_path: x_pose_lateral = x_pose_lateral.new_zeros(x_pose_lateral.shape) if hasattr(self.pose_path, 'layer2_lateral'): feat = x_rgb.detach() if self.pose_detach else x_rgb x_rgb_lateral = self.pose_path.layer2_lateral(feat) if pose_drop_path: x_rgb_lateral = x_rgb_lateral.new_zeros(x_rgb_lateral.shape) if hasattr(self.rgb_path, 'layer3_lateral'): x_rgb = torch.cat((x_rgb, x_pose_lateral), dim=1) if hasattr(self.pose_path, 'layer2_lateral'): x_pose = torch.cat((x_pose, x_rgb_lateral), dim=1) x_rgb = self.rgb_path.layer4(x_rgb) x_pose = self.pose_path.layer3(x_pose) return x_rgb, x_pose
Defines the computation performed at every call. Args: imgs (torch.Tensor): The input data. heatmap_imgs (torch.Tensor): The input data. Returns: tuple[torch.Tensor]: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/rgbposeconv3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/rgbposeconv3d.py
Apache-2.0
def window_partition(x: torch.Tensor, window_size: Sequence[int]) -> torch.Tensor: """ Args: x (torch.Tensor): The input features of shape :math:`(B, D, H, W, C)`. window_size (Sequence[int]): The window size, :math:`(w_d, w_h, w_w)`. Returns: torch.Tensor: The partitioned windows of shape :math:`(B*num_windows, w_d*w_h*w_w, C)`. """ B, D, H, W, C = x.shape x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2], window_size[2], C) windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C) return windows
Args: x (torch.Tensor): The input features of shape :math:`(B, D, H, W, C)`. window_size (Sequence[int]): The window size, :math:`(w_d, w_h, w_w)`. Returns: torch.Tensor: The partitioned windows of shape :math:`(B*num_windows, w_d*w_h*w_w, C)`.
window_partition
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def window_reverse(windows: torch.Tensor, window_size: Sequence[int], B: int, D: int, H: int, W: int) -> torch.Tensor: """ Args: windows (torch.Tensor): Input windows of shape :meth:`(B*num_windows, w_d, w_h, w_w, C)`. window_size (Sequence[int]): The window size, :meth:`(w_d, w_h, w_w)`. B (int): Batch size of feature maps. D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. Returns: torch.Tensor: The feature maps reversed from windows of shape :math:`(B, D, H, W, C)`. """ x = windows.view(B, D // window_size[0], H // window_size[1], W // window_size[2], window_size[0], window_size[1], window_size[2], -1) x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1) return x
Args: windows (torch.Tensor): Input windows of shape :meth:`(B*num_windows, w_d, w_h, w_w, C)`. window_size (Sequence[int]): The window size, :meth:`(w_d, w_h, w_w)`. B (int): Batch size of feature maps. D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. Returns: torch.Tensor: The feature maps reversed from windows of shape :math:`(B, D, H, W, C)`.
window_reverse
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def get_window_size( x_size: Sequence[int], window_size: Sequence[int], shift_size: Optional[Sequence[int]] = None ) -> Union[Tuple[int], Tuple[Tuple[int]]]: """Calculate window size and shift size according to the input size. Args: x_size (Sequence[int]): The input size. window_size (Sequence[int]): The expected window size. shift_size (Sequence[int], optional): The expected shift size. Defaults to None. Returns: tuple: The calculated window size and shift size. """ use_window_size = list(window_size) if shift_size is not None: use_shift_size = list(shift_size) for i in range(len(x_size)): if x_size[i] <= window_size[i]: use_window_size[i] = x_size[i] if shift_size is not None: use_shift_size[i] = 0 if shift_size is None: return tuple(use_window_size) else: return tuple(use_window_size), tuple(use_shift_size)
Calculate window size and shift size according to the input size. Args: x_size (Sequence[int]): The input size. window_size (Sequence[int]): The expected window size. shift_size (Sequence[int], optional): The expected shift size. Defaults to None. Returns: tuple: The calculated window size and shift size.
get_window_size
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def compute_mask(D: int, H: int, W: int, window_size: Sequence[int], shift_size: Sequence[int], device: Union[str, torch.device]) -> torch.Tensor: """Compute attention mask. Args: D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. window_size (Sequence[int]): The window size. shift_size (Sequence[int]): The shift size. device (str or :obj:`torch.device`): The device of the mask. Returns: torch.Tensor: The attention mask used for shifted window attention. """ img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1 cnt = 0 for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice( -shift_size[0], None): for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice( -shift_size[1], None): for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice( -shift_size[2], None): img_mask[:, d, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, window_size) # nW, ws[0]*ws[1]*ws[2], 1 mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2] attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( attn_mask == 0, float(0.0)) return attn_mask
Compute attention mask. Args: D (int): Temporal length of feature maps. H (int): Height of feature maps. W (int): Width of feature maps. window_size (Sequence[int]): The window size. shift_size (Sequence[int]): The shift size. device (str or :obj:`torch.device`): The device of the mask. Returns: torch.Tensor: The attention mask used for shifted window attention.
compute_mask
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: """Forward function. Args: x (torch.Tensor): Input feature maps of shape :meth:`(B*num_windows, N, C)`. mask (torch.Tensor, optional): (0/-inf) mask of shape :meth:`(num_windows, N, N)`. Defaults to None. """ B_, N, C = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C q = q * self.scale attn = q @ k.transpose(-2, -1) relative_position_bias = self.relative_position_bias_table[ self.relative_position_index[:N, :N].reshape(-1)].reshape( N, N, -1) # Wd*Wh*Ww,Wd*Wh*Ww,nH relative_position_bias = relative_position_bias.permute( 2, 0, 1).contiguous() # nH, Wd*Wh*Ww, Wd*Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) # B_, nH, N, N if mask is not None: nW = mask.shape[0] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x
Forward function. Args: x (torch.Tensor): Input feature maps of shape :meth:`(B*num_windows, N, C)`. mask (torch.Tensor, optional): (0/-inf) mask of shape :meth:`(num_windows, N, N)`. Defaults to None.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor, mask_matrix: torch.Tensor) -> torch.Tensor: """ Args: x (torch.Tensor): Input features of shape :math:`(B, D, H, W, C)`. mask_matrix (torch.Tensor): Attention mask for cyclic shift. """ shortcut = x if self.with_cp: x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix) else: x = self.forward_part1(x, mask_matrix) x = shortcut + self.drop_path(x) if self.with_cp: x = x + checkpoint.checkpoint(self.forward_part2, x) else: x = x + self.forward_part2(x) return x
Args: x (torch.Tensor): Input features of shape :math:`(B, D, H, W, C)`. mask_matrix (torch.Tensor): Attention mask for cyclic shift.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Perform patch merging. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, D, H, W, C)`. Returns: torch.Tensor: The merged feature maps of shape :math:`(B, D, H/2, W/2, 2*C)`. """ B, D, H, W, C = x.shape # padding pad_input = (H % 2 == 1) or (W % 2 == 1) if pad_input: x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) x0 = x[:, :, 0::2, 0::2, :] # B D H/2 W/2 C x1 = x[:, :, 1::2, 0::2, :] # B D H/2 W/2 C x2 = x[:, :, 0::2, 1::2, :] # B D H/2 W/2 C x3 = x[:, :, 1::2, 1::2, :] # B D H/2 W/2 C x = torch.cat([x0, x1, x2, x3], -1) # B D H/2 W/2 4*C x = self.norm(x) x = self.reduction(x) return x
Perform patch merging. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, D, H, W, C)`. Returns: torch.Tensor: The merged feature maps of shape :math:`(B, D, H/2, W/2, 2*C)`.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor, do_downsample: bool = True) -> torch.Tensor: """Forward function. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, C, D, H, W)`. do_downsample (bool): Whether to downsample the output of the current layer. Defaults to True. """ # calculate attention mask for SW-MSA B, C, D, H, W = x.shape window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size) x = rearrange(x, 'b c d h w -> b d h w c') Dp = int(np.ceil(D / window_size[0])) * window_size[0] Hp = int(np.ceil(H / window_size[1])) * window_size[1] Wp = int(np.ceil(W / window_size[2])) * window_size[2] attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device) for blk in self.blocks: x = blk(x, attn_mask) if self.downsample is not None and do_downsample: x = self.downsample(x) return x
Forward function. Args: x (torch.Tensor): Input feature maps of shape :math:`(B, C, D, H, W)`. do_downsample (bool): Whether to downsample the output of the current layer. Defaults to True.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def forward(self, x: torch.Tensor) -> torch.Tensor: """Perform video to patch embedding. Args: x (torch.Tensor): The input videos of shape :math:`(B, C, D, H, W)`. In most cases, C is 3. Returns: torch.Tensor: The video patches of shape :math:`(B, embed_dims, Dp, Hp, Wp)`. """ _, _, D, H, W = x.size() if W % self.patch_size[2] != 0: x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) if H % self.patch_size[1] != 0: x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1])) if D % self.patch_size[0] != 0: x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0])) x = self.proj(x) # B C Dp Wp Wp if self.norm is not None: Dp, Hp, Wp = x.size(2), x.size(3), x.size(4) x = x.flatten(2).transpose(1, 2) # B Dp*Hp*Wp C x = self.norm(x) x = x.transpose(1, 2).view(-1, self.embed_dims, Dp, Hp, Wp) return x
Perform video to patch embedding. Args: x (torch.Tensor): The input videos of shape :math:`(B, C, D, H, W)`. In most cases, C is 3. Returns: torch.Tensor: The video patches of shape :math:`(B, embed_dims, Dp, Hp, Wp)`.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def inflate_weights(self, logger: MMLogger) -> None: """Inflate the swin2d parameters to swin3d. The differences between swin3d and swin2d mainly lie in an extra axis. To utilize the pretrained parameters in 2d model, the weight of swin2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information. """ checkpoint = _load_checkpoint(self.pretrained, map_location='cpu') state_dict = checkpoint['model'] # delete relative_position_index since we always re-init it relative_position_index_keys = [ k for k in state_dict.keys() if 'relative_position_index' in k ] for k in relative_position_index_keys: del state_dict[k] # delete attn_mask since we always re-init it attn_mask_keys = [k for k in state_dict.keys() if 'attn_mask' in k] for k in attn_mask_keys: del state_dict[k] state_dict['patch_embed.proj.weight'] = \ state_dict['patch_embed.proj.weight'].unsqueeze(2).\ repeat(1, 1, self.patch_size[0], 1, 1) / self.patch_size[0] # bicubic interpolate relative_position_bias_table if not match relative_position_bias_table_keys = [ k for k in state_dict.keys() if 'relative_position_bias_table' in k ] for k in relative_position_bias_table_keys: relative_position_bias_table_pretrained = state_dict[k] relative_position_bias_table_current = self.state_dict()[k] L1, nH1 = relative_position_bias_table_pretrained.size() L2, nH2 = relative_position_bias_table_current.size() L2 = (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1) wd = self.window_size[0] if nH1 != nH2: logger.warning(f'Error in loading {k}, passing') else: if L1 != L2: S1 = int(L1**0.5) relative_position_bias_table_pretrained_resized = \ torch.nn.functional.interpolate( relative_position_bias_table_pretrained.permute( 1, 0).view(1, nH1, S1, S1), size=(2 * self.window_size[1] - 1, 2 * self.window_size[2] - 1), mode='bicubic') relative_position_bias_table_pretrained = \ relative_position_bias_table_pretrained_resized. \ view(nH2, L2).permute(1, 0) state_dict[k] = relative_position_bias_table_pretrained.repeat( 2 * wd - 1, 1) # In the original swin2d checkpoint, the last layer of the # backbone is the norm layer, and the original attribute # name is `norm`. We changed it to `norm3` which means it # is the last norm layer of stage 4. if hasattr(self, 'norm3'): state_dict['norm3.weight'] = state_dict['norm.weight'] state_dict['norm3.bias'] = state_dict['norm.bias'] del state_dict['norm.weight'] del state_dict['norm.bias'] msg = self.load_state_dict(state_dict, strict=False) logger.info(msg)
Inflate the swin2d parameters to swin3d. The differences between swin3d and swin2d mainly lie in an extra axis. To utilize the pretrained parameters in 2d model, the weight of swin2d models should be inflated to fit in the shapes of the 3d counterpart. Args: logger (MMLogger): The logger used to print debugging information.
inflate_weights
python
open-mmlab/mmaction2
mmaction/models/backbones/swin.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/swin.py
Apache-2.0
def make_tam_modeling(self): """Replace ResNet-Block with TA-Block.""" def make_tam_block(stage, num_segments, tam_cfg=dict()): blocks = list(stage.children()) for i, block in enumerate(blocks): blocks[i] = TABlock(block, num_segments, deepcopy(tam_cfg)) return nn.Sequential(*blocks) for i in range(self.num_stages): layer_name = f'layer{i + 1}' res_layer = getattr(self, layer_name) setattr(self, layer_name, make_tam_block(res_layer, self.num_segments, self.tam_cfg))
Replace ResNet-Block with TA-Block.
make_tam_modeling
python
open-mmlab/mmaction2
mmaction/models/backbones/tanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/tanet.py
Apache-2.0
def conv_3xnxn(inp: int, oup: int, kernel_size: int = 3, stride: int = 3, groups: int = 1): """3D convolution with kernel size of 3xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1. """ return nn.Conv3d( inp, oup, (3, kernel_size, kernel_size), (2, stride, stride), (1, 0, 0), groups=groups)
3D convolution with kernel size of 3xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1.
conv_3xnxn
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def conv_1xnxn(inp: int, oup: int, kernel_size: int = 3, stride: int = 3, groups: int = 1): """3D convolution with kernel size of 1xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1. """ return nn.Conv3d( inp, oup, (1, kernel_size, kernel_size), (1, stride, stride), (0, 0, 0), groups=groups)
3D convolution with kernel size of 1xnxn. Args: inp (int): Dimension of input features. oup (int): Dimension of output features. kernel_size (int): The spatial kernel size (i.e., n). Defaults to 3. stride (int): The spatial stride. Defaults to 3. groups (int): Group number of operated features. Defaults to 1.
conv_1xnxn
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def _load_pretrained(self, pretrained: str = None) -> None: """Load ImageNet-1K pretrained model. The model is pretrained with ImageNet-1K. https://github.com/Sense-X/UniFormer Args: pretrained (str): Model name of ImageNet-1K pretrained model. Defaults to None. """ if pretrained is not None: model_path = _MODELS[pretrained] logger.info(f'Load ImageNet pretrained model from {model_path}') state_dict = _load_checkpoint(model_path, map_location='cpu') state_dict_3d = self.state_dict() for k in state_dict.keys(): if k in state_dict_3d.keys( ) and state_dict[k].shape != state_dict_3d[k].shape: if len(state_dict_3d[k].shape) <= 2: logger.info(f'Ignore: {k}') continue logger.info(f'Inflate: {k}, {state_dict[k].shape}' + f' => {state_dict_3d[k].shape}') time_dim = state_dict_3d[k].shape[2] state_dict[k] = self._inflate_weight( state_dict[k], time_dim) self.load_state_dict(state_dict, strict=False)
Load ImageNet-1K pretrained model. The model is pretrained with ImageNet-1K. https://github.com/Sense-X/UniFormer Args: pretrained (str): Model name of ImageNet-1K pretrained model. Defaults to None.
_load_pretrained
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformer.py
Apache-2.0
def _load_pretrained(self, pretrained: str = None) -> None: """Load CLIP pretrained visual encoder. The visual encoder is extracted from CLIP. https://github.com/openai/CLIP Args: pretrained (str): Model name of pretrained CLIP visual encoder. Defaults to None. """ assert pretrained is not None, \ 'please specify clip pretraied checkpoint' model_path = _MODELS[pretrained] logger.info(f'Load CLIP pretrained model from {model_path}') state_dict = _load_checkpoint(model_path, map_location='cpu') state_dict_3d = self.state_dict() for k in state_dict.keys(): if k in state_dict_3d.keys( ) and state_dict[k].shape != state_dict_3d[k].shape: if len(state_dict_3d[k].shape) <= 2: logger.info(f'Ignore: {k}') continue logger.info(f'Inflate: {k}, {state_dict[k].shape}' + f' => {state_dict_3d[k].shape}') time_dim = state_dict_3d[k].shape[2] state_dict[k] = self._inflate_weight(state_dict[k], time_dim) self.load_state_dict(state_dict, strict=False)
Load CLIP pretrained visual encoder. The visual encoder is extracted from CLIP. https://github.com/openai/CLIP Args: pretrained (str): Model name of pretrained CLIP visual encoder. Defaults to None.
_load_pretrained
python
open-mmlab/mmaction2
mmaction/models/backbones/uniformerv2.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/uniformerv2.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data with size of (B, N, C). Returns: Tensor: The output of the attention block, same size as inputs. """ B, N, C = x.shape if hasattr(self, 'q_bias'): k_bias = torch.zeros_like(self.v_bias, requires_grad=False) qkv_bias = torch.cat((self.q_bias, k_bias, self.v_bias)) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) else: qkv = self.qkv(x) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x
Defines the computation performed at every call. Args: x (Tensor): The input data with size of (B, N, C). Returns: Tensor: The output of the attention block, same size as inputs.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/vit_mae.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data with size of (B, N, C). Returns: Tensor: The output of the transformer block, same size as inputs. """ if hasattr(self, 'gamma_1'): x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x
Defines the computation performed at every call. Args: x (Tensor): The input data with size of (B, N, C). Returns: Tensor: The output of the transformer block, same size as inputs.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/vit_mae.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py
Apache-2.0
def get_sinusoid_encoding(n_position: int, embed_dims: int) -> Tensor: """Generate sinusoid encoding table. Sinusoid encoding is a kind of relative position encoding method came from `Attention Is All You Need<https://arxiv.org/abs/1706.03762>`_. Args: n_position (int): The length of the input token. embed_dims (int): The position embedding dimension. Returns: :obj:`torch.FloatTensor`: The sinusoid encoding table of size (1, n_position, embed_dims) """ vec = torch.arange(embed_dims, dtype=torch.float64) vec = (vec - vec % 2) / embed_dims vec = torch.pow(10000, -vec).view(1, -1) sinusoid_table = torch.arange(n_position).view(-1, 1) * vec sinusoid_table[:, 0::2].sin_() # dim 2i sinusoid_table[:, 1::2].cos_() # dim 2i+1 sinusoid_table = sinusoid_table.to(torch.float32) return sinusoid_table.unsqueeze(0)
Generate sinusoid encoding table. Sinusoid encoding is a kind of relative position encoding method came from `Attention Is All You Need<https://arxiv.org/abs/1706.03762>`_. Args: n_position (int): The length of the input token. embed_dims (int): The position embedding dimension. Returns: :obj:`torch.FloatTensor`: The sinusoid encoding table of size (1, n_position, embed_dims)
get_sinusoid_encoding
python
open-mmlab/mmaction2
mmaction/models/backbones/vit_mae.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The feature of the input samples extracted by the backbone. """ b, _, _, h, w = x.shape h //= self.patch_size w //= self.patch_size x = self.patch_embed(x)[0] if (h, w) != self.grid_size: pos_embed = self.pos_embed.reshape(-1, *self.grid_size, self.embed_dims) pos_embed = pos_embed.permute(0, 3, 1, 2) pos_embed = F.interpolate( pos_embed, size=(h, w), mode='bicubic', align_corners=False) pos_embed = pos_embed.permute(0, 2, 3, 1).flatten(1, 2) pos_embed = pos_embed.reshape(1, -1, self.embed_dims) else: pos_embed = self.pos_embed x = x + pos_embed x = self.pos_drop(x) for blk in self.blocks: x = blk(x) x = self.norm(x) if self.return_feat_map: x = x.reshape(b, -1, h, w, self.embed_dims) x = x.permute(0, 4, 1, 2, 3) return x if self.fc_norm is not None: return self.fc_norm(x.mean(1)) return x[:, 0]
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The feature of the input samples extracted by the backbone.
forward
python
open-mmlab/mmaction2
mmaction/models/backbones/vit_mae.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/vit_mae.py
Apache-2.0
def make_res_layer(self, block, layer_inplanes, inplanes, planes, blocks, spatial_stride=1, se_style='half', se_ratio=None, use_swish=True, norm_cfg=None, act_cfg=None, conv_cfg=None, with_cp=False, **kwargs): """Build residual layer for ResNet3D. Args: block (nn.Module): Residual module to be built. layer_inplanes (int): Number of channels for the input feature of the res layer. inplanes (int): Number of channels for the input feature in each block, which equals to base_channels * gamma_w. planes (int): Number of channels for the output feature in each block, which equals to base_channel * gamma_w * gamma_b. blocks (int): Number of residual blocks. spatial_stride (int): Spatial strides in residual and conv layers. Default: 1. se_style (str): The style of inserting SE modules into BlockX3D, 'half' denotes insert into half of the blocks, while 'all' denotes insert into all blocks. Default: 'half'. se_ratio (float | None): The reduction ratio of squeeze and excitation unit. If set as None, it means not using SE unit. Default: None. use_swish (bool): Whether to use swish as the activation function before and after the 3x3x3 conv. Default: True. conv_cfg (dict | None): Config for norm layers. Default: None. norm_cfg (dict | None): Config for norm layers. Default: None. act_cfg (dict | None): Config for activate layers. Default: None. with_cp (bool | None): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: nn.Module: A residual layer for the given config. """ downsample = None if spatial_stride != 1 or layer_inplanes != inplanes: downsample = ConvModule( layer_inplanes, inplanes, kernel_size=1, stride=(1, spatial_stride, spatial_stride), padding=0, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) use_se = [False] * blocks if self.se_style == 'all': use_se = [True] * blocks elif self.se_style == 'half': use_se = [i % 2 == 0 for i in range(blocks)] else: raise NotImplementedError layers = [] layers.append( block( layer_inplanes, planes, inplanes, spatial_stride=spatial_stride, downsample=downsample, se_ratio=se_ratio if use_se[0] else None, use_swish=use_swish, norm_cfg=norm_cfg, conv_cfg=conv_cfg, act_cfg=act_cfg, with_cp=with_cp, **kwargs)) for i in range(1, blocks): layers.append( block( inplanes, planes, inplanes, spatial_stride=1, se_ratio=se_ratio if use_se[i] else None, use_swish=use_swish, norm_cfg=norm_cfg, conv_cfg=conv_cfg, act_cfg=act_cfg, with_cp=with_cp, **kwargs)) return nn.Sequential(*layers)
Build residual layer for ResNet3D. Args: block (nn.Module): Residual module to be built. layer_inplanes (int): Number of channels for the input feature of the res layer. inplanes (int): Number of channels for the input feature in each block, which equals to base_channels * gamma_w. planes (int): Number of channels for the output feature in each block, which equals to base_channel * gamma_w * gamma_b. blocks (int): Number of residual blocks. spatial_stride (int): Spatial strides in residual and conv layers. Default: 1. se_style (str): The style of inserting SE modules into BlockX3D, 'half' denotes insert into half of the blocks, while 'all' denotes insert into all blocks. Default: 'half'. se_ratio (float | None): The reduction ratio of squeeze and excitation unit. If set as None, it means not using SE unit. Default: None. use_swish (bool): Whether to use swish as the activation function before and after the 3x3x3 conv. Default: True. conv_cfg (dict | None): Config for norm layers. Default: None. norm_cfg (dict | None): Config for norm layers. Default: None. act_cfg (dict | None): Config for activate layers. Default: None. with_cp (bool | None): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: nn.Module: A residual layer for the given config.
make_res_layer
python
open-mmlab/mmaction2
mmaction/models/backbones/x3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/backbones/x3d.py
Apache-2.0
def aggregate_stats(self): """Synchronize running_mean, and running_var to self.bn. Call this before eval, then call model.eval(); When eval, forward function will call self.bn instead of self.split_bn, During this time the running_mean, and running_var of self.bn has been obtained from self.split_bn. """ if self.split_bn.track_running_stats: aggre_func = self._get_aggregated_mean_std self.bn.running_mean.data, self.bn.running_var.data = aggre_func( self.split_bn.running_mean, self.split_bn.running_var, self.num_splits) self.bn.num_batches_tracked = self.split_bn.num_batches_tracked.detach( )
Synchronize running_mean, and running_var to self.bn. Call this before eval, then call model.eval(); When eval, forward function will call self.bn instead of self.split_bn, During this time the running_mean, and running_var of self.bn has been obtained from self.split_bn.
aggregate_stats
python
open-mmlab/mmaction2
mmaction/models/common/sub_batchnorm3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/common/sub_batchnorm3d.py
Apache-2.0
def forward(self, data: Union[dict, Tuple[dict]], training: bool = False) -> Union[dict, Tuple[dict]]: """Perform normalization, padding, bgr2rgb conversion and batch augmentation based on ``BaseDataPreprocessor``. Args: data (dict or Tuple[dict]): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict or Tuple[dict]: Data in the same format as the model input. """ data = self.cast_data(data) if isinstance(data, dict): return self.forward_onesample(data, training=training) elif isinstance(data, (tuple, list)): outputs = [] for data_sample in data: output = self.forward_onesample(data_sample, training=training) outputs.append(output) return tuple(outputs) else: raise TypeError(f'Unsupported data type: {type(data)}!')
Perform normalization, padding, bgr2rgb conversion and batch augmentation based on ``BaseDataPreprocessor``. Args: data (dict or Tuple[dict]): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict or Tuple[dict]: Data in the same format as the model input.
forward
python
open-mmlab/mmaction2
mmaction/models/data_preprocessors/data_preprocessor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/data_preprocessor.py
Apache-2.0
def forward_onesample(self, data, training: bool = False) -> dict: """Perform normalization, padding, bgr2rgb conversion and batch augmentation on one data sample. Args: data (dict): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input. """ inputs, data_samples = data['inputs'], data['data_samples'] inputs, data_samples = self.preprocess(inputs, data_samples, training) data['inputs'] = inputs data['data_samples'] = data_samples return data
Perform normalization, padding, bgr2rgb conversion and batch augmentation on one data sample. Args: data (dict): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input.
forward_onesample
python
open-mmlab/mmaction2
mmaction/models/data_preprocessors/data_preprocessor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/data_preprocessor.py
Apache-2.0
def forward(self, data: Dict, training: bool = False) -> Dict: """Preprocesses the data into the model input format. Args: data (dict): Data returned by dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input. """ data = self.cast_data(data) inputs, data_samples = data['inputs'], data['data_samples'] for modality, modality_data in inputs.items(): preprocessor = self.preprocessors[modality] modality_data, data_samples = preprocessor.preprocess( modality_data, data_samples, training) inputs[modality] = modality_data data['inputs'] = inputs data['data_samples'] = data_samples return data
Preprocesses the data into the model input format. Args: data (dict): Data returned by dataloader. training (bool): Whether to enable training time augmentation. Returns: dict: Data in the same format as the model input.
forward
python
open-mmlab/mmaction2
mmaction/models/data_preprocessors/multimodal_data_preprocessor.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/data_preprocessors/multimodal_data_preprocessor.py
Apache-2.0