code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def loss(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]], data_samples: SampleList, **kwargs) -> Dict: """Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components. """ cls_scores = self(feats, **kwargs) return self.loss_by_feat(cls_scores, data_samples)
Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def loss_by_feat(self, cls_scores: torch.Tensor, data_samples: SampleList) -> Dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components. """ labels = [x.gt_label for x in data_samples] labels = torch.stack(labels).to(cls_scores.device) labels = labels.squeeze() losses = dict() if labels.shape == torch.Size([]): labels = labels.unsqueeze(0) elif labels.dim() == 1 and labels.size()[0] == self.num_classes \ and cls_scores.size()[0] == 1: # Fix a bug when training with soft labels and batch size is 1. # When using soft labels, `labels` and `cls_score` share the same # shape. labels = labels.unsqueeze(0) if cls_scores.size() != labels.size(): top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(), labels.detach().cpu().numpy(), self.topk) for k, a in zip(self.topk, top_k_acc): losses[f'top{k}_acc'] = torch.tensor( a, device=cls_scores.device) if self.label_smooth_eps != 0: if cls_scores.size() != labels.size(): labels = F.one_hot(labels, num_classes=self.num_classes) labels = ((1 - self.label_smooth_eps) * labels + self.label_smooth_eps / self.num_classes) loss_cls = self.loss_cls(cls_scores, labels) # loss_cls may be dictionary or single tensor if isinstance(loss_cls, dict): losses.update(loss_cls) else: losses['loss_cls'] = loss_cls return losses
Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components.
loss_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def predict(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]], data_samples: SampleList, **kwargs) -> SampleList: """Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: list[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`. """ cls_scores = self(feats, **kwargs) return self.predict_by_feat(cls_scores, data_samples)
Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: list[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`.
predict
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def predict_by_feat(self, cls_scores: torch.Tensor, data_samples: SampleList) -> SampleList: """Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape (B*num_segs, num_classes) data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. It usually includes information such as `gt_label`. Returns: List[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`. """ num_segs = cls_scores.shape[0] // len(data_samples) cls_scores = self.average_clip(cls_scores, num_segs=num_segs) pred_labels = cls_scores.argmax(dim=-1, keepdim=True).detach() for data_sample, score, pred_label in zip(data_samples, cls_scores, pred_labels): data_sample.set_pred_score(score) data_sample.set_pred_label(pred_label) return data_samples
Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape (B*num_segs, num_classes) data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. It usually includes information such as `gt_label`. Returns: List[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`.
predict_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def average_clip(self, cls_scores: torch.Tensor, num_segs: int = 1) -> torch.Tensor: """Averaging class scores over multiple clips. Using different averaging types ('score' or 'prob' or None, which defined in test_cfg) to computed the final averaged class score. Only called in test mode. Args: cls_scores (torch.Tensor): Class scores to be averaged. num_segs (int): Number of clips for each input sample. Returns: torch.Tensor: Averaged class scores. """ if self.average_clips not in ['score', 'prob', None]: raise ValueError(f'{self.average_clips} is not supported. ' f'Currently supported ones are ' f'["score", "prob", None]') batch_size = cls_scores.shape[0] cls_scores = cls_scores.view((batch_size // num_segs, num_segs) + cls_scores.shape[1:]) if self.average_clips is None: return cls_scores elif self.average_clips == 'prob': cls_scores = F.softmax(cls_scores, dim=2).mean(dim=1) elif self.average_clips == 'score': cls_scores = cls_scores.mean(dim=1) return cls_scores
Averaging class scores over multiple clips. Using different averaging types ('score' or 'prob' or None, which defined in test_cfg) to computed the final averaged class score. Only called in test mode. Args: cls_scores (torch.Tensor): Class scores to be averaged. num_segs (int): Number of clips for each input sample. Returns: torch.Tensor: Averaged class scores.
average_clip
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def forward(self, x: Tensor, num_segs: Optional[int] = None, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): For 2D backbone. Number of segments into which a video is divided. Defaults to None. Returns: Tensor: The output features after pooling. """ if isinstance(x, Tensor): n_dims = x.ndim elif isinstance(x, tuple): n_dims = x[0].ndim assert self.backbone_name == 'slowfast', \ 'Only support SlowFast backbone to input tuple' else: raise NotImplementedError(f'Unsupported feature type: {type(x)}') # For 2D backbone with spatial dimension if n_dims == 4: assert num_segs is not None if self.backbone_name == 'tsm': assert self.num_segments is not None, \ 'Please Specify num_segments for TSM' num_segs = self.num_segments # [N, T, channels, H, W] x = x.view((-1, num_segs) + x.shape[1:]) feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=1) elif n_dims == 5: if self.backbone_name == 'slowfast': x_slow, x_fast = x assert self.temporal_type is not None, \ 'slowfast backbone has to pool temporal dimension' x_fast = self.pool1d(self.pool2d(x_fast, dim=[-2, -1]), dim=2) x_slow = self.pool1d(self.pool2d(x_slow, dim=[-2, -1]), dim=2) feat = torch.cat((x_slow, x_fast), dim=1) # For GCN-based backbone elif self.backbone_name == 'gcn': # N, M, C, T, V feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=1) # For 3D backbone with spatial dimension else: # [N, channels, T, H, W] feat = self.pool1d(self.pool2d(x, dim=[-2, -1]), dim=2) # For backbone output feature without spatial and temporal dimension elif n_dims == 2: # [N, channels] feat = x return feat
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): For 2D backbone. Number of segments into which a video is divided. Defaults to None. Returns: Tensor: The output features after pooling.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/feature_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py
Apache-2.0
def predict_by_feat(self, feats: Union[Tensor, Tuple[Tensor]], data_samples) -> Tensor: """Integrate multi-view features into one tensor. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: Tensor: The integrated multi-view features. """ num_segs = feats.shape[0] // len(data_samples) feats = self.average_clip(feats, num_segs=num_segs) return feats
Integrate multi-view features into one tensor. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: Tensor: The integrated multi-view features.
predict_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/feature_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py
Apache-2.0
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor: """Forward features from the upstream network. Args: x (torch.Tensor): Features from the upstream network. Returns: torch.Tensor: Classification scores with shape (B, num_classes). """ N, M, C, T, V = x.shape x = x.view(N * M, C, T, V) x = self.pool(x) x = x.view(N, M, C) x = x.mean(dim=1) assert x.shape[1] == self.in_channels if self.dropout is not None: x = self.dropout(x) cls_scores = self.fc(x) return cls_scores
Forward features from the upstream network. Args: x (torch.Tensor): Features from the upstream network. Returns: torch.Tensor: Classification scores with shape (B, num_classes).
forward
python
open-mmlab/mmaction2
mmaction/models/heads/gcn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/gcn_head.py
Apache-2.0
def forward(self, x: Tensor, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples. """ # [N, in_channels, 4, 7, 7] if self.avg_pool is not None: x = self.avg_pool(x) # [N, in_channels, 1, 1, 1] if self.dropout is not None: x = self.dropout(x) # [N, in_channels, 1, 1, 1] x = x.view(x.shape[0], -1) # [N, in_channels] cls_score = self.fc_cls(x) # [N, num_classes] return cls_score
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/i3d_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/i3d_head.py
Apache-2.0
def pre_logits(self, feats: Tuple[List[Tensor]]) -> Tensor: """The process before the final classification head. The input ``feats`` is a tuple of list of tensor, and each tensor is the feature of a backbone stage. """ if self.with_cls_token: _, cls_token = feats[-1] return cls_token else: patch_token = feats[-1] return patch_token.mean(dim=(2, 3, 4))
The process before the final classification head. The input ``feats`` is a tuple of list of tensor, and each tensor is the feature of a backbone stage.
pre_logits
python
open-mmlab/mmaction2
mmaction/models/heads/mvit_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py
Apache-2.0
def forward(self, x: Tuple[List[Tensor]], **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tuple[List[Tensor]]): The input data. Returns: Tensor: The classification scores for input samples. """ x = self.pre_logits(x) if self.dropout is not None: x = self.dropout(x) # [N, in_channels] cls_score = self.fc_cls(x) # [N, num_classes] return cls_score
Defines the computation performed at every call. Args: x (Tuple[List[Tensor]]): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/mvit_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py
Apache-2.0
def loss_by_feat(self, cls_scores: Union[Tensor, Tuple[Tensor]], data_samples: SampleList) -> dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components. """ labels = [x.gt_label for x in data_samples] labels = torch.stack(labels).to(cls_scores.device) labels = labels.squeeze() losses = dict() if labels.shape == torch.Size([]): labels = labels.unsqueeze(0) elif labels.dim() == 1 and cls_scores.size()[0] == 1: # Fix a bug when training with soft labels and batch size is 1. # When using soft labels, `labels` and `cls_socre` share the same # shape. labels = labels.unsqueeze(0) if cls_scores.size() != labels.size(): top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(), labels.detach().cpu().numpy(), self.topk) for k, a in zip(self.topk, top_k_acc): losses[f'top{k}_acc'] = torch.tensor( a, device=cls_scores.device) if self.label_smooth_eps != 0: if cls_scores.size() != labels.size(): labels = F.one_hot(labels, num_classes=self.num_classes) labels = ((1 - self.label_smooth_eps) * labels + self.label_smooth_eps / self.num_classes) loss_cls = self.loss_cls(cls_scores, labels) # loss_cls may be dictionary or single tensor if isinstance(loss_cls, dict): losses.update(loss_cls) else: losses['loss_cls'] = loss_cls return losses
Calculate the loss based on the features extracted by the head. Args: cls_scores (Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components.
loss_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/omni_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/omni_head.py
Apache-2.0
def loss(self, feats: Tuple[torch.Tensor], data_samples: SampleList, **kwargs) -> Dict: """Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components. """ cls_scores = self(feats, **kwargs) return self.loss_by_feat(cls_scores, data_samples)
Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def loss_by_feat(self, cls_scores: Dict[str, torch.Tensor], data_samples: SampleList) -> Dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (dict[str, torch.Tensor]): The dict of classification scores, data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components. """ labels = torch.stack([x.gt_label for x in data_samples]) labels = labels.squeeze() if labels.shape == torch.Size([]): labels = labels.unsqueeze(0) elif labels.dim() == 1 and labels.size()[0] == self.num_classes \ and cls_scores.size()[0] == 1: # Fix a bug when training with soft labels and batch size is 1. # When using soft labels, `labels` and `cls_score` share the same # shape. labels = labels.unsqueeze(0) losses = dict() for loss_name, weight in zip(self.loss_components, self.loss_weights): cls_score = cls_scores[loss_name] loss_cls = self.loss_by_scores(cls_score, labels) loss_cls = {loss_name + '_' + k: v for k, v in loss_cls.items()} loss_cls[f'{loss_name}_loss_cls'] *= weight losses.update(loss_cls) return losses
Calculate the loss based on the features extracted by the head. Args: cls_scores (dict[str, torch.Tensor]): The dict of classification scores, data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dictionary of loss components.
loss_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def loss_by_scores(self, cls_scores: torch.Tensor, labels: torch.Tensor) -> Dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). labels (torch.Tensor): The labels used to calculate the loss. Returns: dict: A dictionary of loss components. """ losses = dict() if cls_scores.size() != labels.size(): top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(), labels.detach().cpu().numpy(), self.topk) for k, a in zip(self.topk, top_k_acc): losses[f'top{k}_acc'] = torch.tensor( a, device=cls_scores.device) if self.label_smooth_eps != 0: if cls_scores.size() != labels.size(): labels = F.one_hot(labels, num_classes=self.num_classes) labels = ((1 - self.label_smooth_eps) * labels + self.label_smooth_eps / self.num_classes) loss_cls = self.loss_cls(cls_scores, labels) # loss_cls may be dictionary or single tensor if isinstance(loss_cls, dict): losses.update(loss_cls) else: losses['loss_cls'] = loss_cls return losses
Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). labels (torch.Tensor): The labels used to calculate the loss. Returns: dict: A dictionary of loss components.
loss_by_scores
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def predict(self, feats: Tuple[torch.Tensor], data_samples: SampleList, **kwargs) -> SampleList: """Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: list[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`. """ cls_scores = self(feats, **kwargs) return self.predict_by_feat(cls_scores, data_samples)
Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: list[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`.
predict
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def predict_by_feat(self, cls_scores: Dict[str, torch.Tensor], data_samples: SampleList) -> SampleList: """Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (dict[str, torch.Tensor]): The dict of classification scores, data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. It usually includes information such as `gt_label`. Returns: list[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`. """ pred_scores = [dict() for _ in range(len(data_samples))] for name in self.loss_components: cls_score = cls_scores[name] cls_score = self.predict_by_scores(cls_score, data_samples) for pred_score, score in zip(pred_scores, cls_score): pred_score[f'{name}'] = score for data_sample, pred_score, in zip(data_samples, pred_scores): data_sample.set_pred_score(pred_score) return data_samples
Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (dict[str, torch.Tensor]): The dict of classification scores, data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. It usually includes information such as `gt_label`. Returns: list[:obj:`ActionDataSample`]: Recognition results wrapped by :obj:`ActionDataSample`.
predict_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def predict_by_scores(self, cls_scores: torch.Tensor, data_samples: SampleList) -> torch.Tensor: """Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape (B*num_segs, num_classes) data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. Returns: torch.Tensor: The averaged classification scores. """ num_segs = cls_scores.shape[0] // len(data_samples) cls_scores = self.average_clip(cls_scores, num_segs=num_segs) return cls_scores
Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape (B*num_segs, num_classes) data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples. Returns: torch.Tensor: The averaged classification scores.
predict_by_scores
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def forward(self, x: Tuple[Tensor], **kwargs) -> None: """Defines the computation performed at every call. Args: x (tuple[torch.Tensor]): The input data. Returns: Tensor: The classification scores for input samples. """ # ([N, channel_slow, T1, H, W], [(N, channel_fast, T2, H, W)]) x_slow, x_fast = x # ([N, channel_slow, 1, 1, 1], [N, channel_fast, 1, 1, 1]) x_slow = self.avg_pool(x_slow) x_fast = self.avg_pool(x_fast) # [N, channel_fast + channel_slow, 1, 1, 1] x = torch.cat((x_fast, x_slow), dim=1) if self.dropout is not None: x = self.dropout(x) # [N x C] x = x.view(x.size(0), -1) # [N x num_classes] cls_score = self.fc_cls(x) return cls_score
Defines the computation performed at every call. Args: x (tuple[torch.Tensor]): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/slowfast_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/slowfast_head.py
Apache-2.0
def forward(self, x, num_segs: Optional[int] = None, fcn_test: bool = False, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int, optional): Number of segments into which a video is divided. Defaults to None. fcn_test (bool): Whether to apply full convolution (fcn) testing. Defaults to False. Returns: Tensor: The classification scores for input samples. """ if fcn_test: if self.avg_pool3d: x = self.avg_pool3d(x) if self.new_cls is None: self._init_new_cls() x = self.new_cls(x) cls_score_feat_map = x.view(x.size(0), -1) return cls_score_feat_map if self.avg_pool2d is None: kernel_size = (1, x.shape[-2], x.shape[-1]) self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0) if num_segs is None: # [N, in_channels, 3, 7, 7] x = self.avg_pool3d(x) else: # [N * num_segs, in_channels, 7, 7] x = self.avg_pool2d(x) # [N * num_segs, in_channels, 1, 1] x = x.reshape((-1, num_segs) + x.shape[1:]) # [N, num_segs, in_channels, 1, 1] x = self.consensus(x) # [N, 1, in_channels, 1, 1] x = x.squeeze(1) # [N, in_channels, 1, 1] if self.dropout is not None: x = self.dropout(x) # [N, in_channels, 1, 1] x = x.view(x.size(0), -1) # [N, in_channels] cls_score = self.fc_cls(x) # [N, num_classes] return cls_score
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int, optional): Number of segments into which a video is divided. Defaults to None. fcn_test (bool): Whether to apply full convolution (fcn) testing. Defaults to False. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/tpn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tpn_head.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples. """ # [N, num_segs * hidden_dim] x = x.view(x.size(0), -1) x = self.classifier(x) return x
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/trn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py
Apache-2.0
def forward(self, x, num_segs, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. num_segs (int): Useless in TRNHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which is automatically generated in Recognizer forward phase and useless in TRN models. The `self.num_segments` we need is a hyper parameter to build TRN models. Returns: torch.Tensor: The classification scores for input samples. """ # [N * num_segs, in_channels, 7, 7] if self.avg_pool is not None: x = self.avg_pool(x) # [N * num_segs, in_channels, 1, 1] x = torch.flatten(x, 1) # [N * num_segs, in_channels] if self.dropout is not None: x = self.dropout(x) # [N, num_segs, hidden_dim] cls_score = self.fc_cls(x) cls_score = cls_score.view((-1, self.num_segments) + cls_score.size()[1:]) # [N, num_classes] cls_score = self.consensus(cls_score) return cls_score
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. num_segs (int): Useless in TRNHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which is automatically generated in Recognizer forward phase and useless in TRN models. The `self.num_segments` we need is a hyper parameter to build TRN models. Returns: torch.Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/trn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py
Apache-2.0
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Useless in TSMHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which is automatically generated in Recognizer forward phase and useless in TSM models. The `self.num_segments` we need is a hyper parameter to build TSM models. Returns: Tensor: The classification scores for input samples. """ # [N * num_segs, in_channels, 7, 7] if self.avg_pool is not None: x = self.avg_pool(x) # [N * num_segs, in_channels, 1, 1] x = torch.flatten(x, 1) # [N * num_segs, in_channels] if self.dropout is not None: x = self.dropout(x) # [N * num_segs, num_classes] cls_score = self.fc_cls(x) if self.is_shift and self.temporal_pool: # [2 * N, num_segs // 2, num_classes] cls_score = cls_score.view((-1, self.num_segments // 2) + cls_score.size()[1:]) else: # [N, num_segs, num_classes] cls_score = cls_score.view((-1, self.num_segments) + cls_score.size()[1:]) # [N, 1, num_classes] cls_score = self.consensus(cls_score) # [N, num_classes] return cls_score.squeeze(1)
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Useless in TSMHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which is automatically generated in Recognizer forward phase and useless in TSM models. The `self.num_segments` we need is a hyper parameter to build TSM models. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/tsm_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsm_head.py
Apache-2.0
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Number of segments into which a video is divided. Returns: Tensor: The classification scores for input samples. """ # [N * num_segs, in_channels, 7, 7] if self.avg_pool is not None: if isinstance(x, tuple): shapes = [y.shape for y in x] assert 1 == 0, f'x is tuple {shapes}' x = self.avg_pool(x) # [N * num_segs, in_channels, 1, 1] x = x.reshape((-1, num_segs) + x.shape[1:]) # [N, num_segs, in_channels, 1, 1] x = self.consensus(x) # [N, 1, in_channels, 1, 1] x = x.squeeze(1) # [N, in_channels, 1, 1] if self.dropout is not None: x = self.dropout(x) # [N, in_channels, 1, 1] x = x.view(x.size(0), -1) # [N, in_channels] cls_score = self.fc_cls(x) # [N, num_classes] return cls_score
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Number of segments into which a video is divided. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/tsn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_head.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ inputs = torch.stack(inputs) if mode == 'tensor': return self._forward(inputs, **kwargs) if mode == 'predict': return self.predict(inputs, data_samples, **kwargs) elif mode == 'loss': return self.loss(inputs, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def loss(self, batch_inputs, batch_data_samples, **kwargs): """Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components. """ gt_bbox = [ sample.gt_instances['gt_bbox'] for sample in batch_data_samples ] label_confidence, label_start, label_end = self.generate_labels( gt_bbox) device = batch_inputs.device label_confidence = label_confidence.to(device) label_start = label_start.to(device) label_end = label_end.to(device) confidence_map, start, end = self._forward(batch_inputs) loss = self.loss_cls(confidence_map, start, end, label_confidence, label_start, label_end, self.bm_mask) loss_dict = dict(loss=loss[0]) return loss_dict
Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usually includes information such as ``gt_labels``. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def predict(self, batch_inputs, batch_data_samples, **kwargs): """Define the computation performed at every call when testing.""" confidence_map, start, end = self._forward(batch_inputs) start_scores = start[0].cpu().numpy() end_scores = end[0].cpu().numpy() cls_confidence = (confidence_map[0][1]).cpu().numpy() reg_confidence = (confidence_map[0][0]).cpu().numpy() max_start = max(start_scores) max_end = max(end_scores) # generate the set of start points and end points start_bins = np.zeros(len(start_scores)) start_bins[0] = 1 # [1,0,0...,0,0] end_bins = np.zeros(len(end_scores)) end_bins[-1] = 1 # [0,0,0...,0,1] for idx in range(1, self.tscale - 1): if start_scores[idx] > start_scores[ idx + 1] and start_scores[idx] > start_scores[idx - 1]: start_bins[idx] = 1 elif start_scores[idx] > (0.5 * max_start): start_bins[idx] = 1 if end_scores[idx] > end_scores[ idx + 1] and end_scores[idx] > end_scores[idx - 1]: end_bins[idx] = 1 elif end_scores[idx] > (0.5 * max_end): end_bins[idx] = 1 # iterate through all combinations of start_index and end_index new_proposals = [] for idx in range(self.tscale): for jdx in range(self.tscale): start_index = jdx end_index = start_index + idx + 1 if end_index < self.tscale and start_bins[ start_index] == 1 and end_bins[end_index] == 1: tmin = start_index / self.tscale tmax = end_index / self.tscale tmin_score = start_scores[start_index] tmax_score = end_scores[end_index] cls_score = cls_confidence[idx, jdx] reg_score = reg_confidence[idx, jdx] score = tmin_score * tmax_score * cls_score * reg_score new_proposals.append([ tmin, tmax, tmin_score, tmax_score, cls_score, reg_score, score ]) new_proposals = np.stack(new_proposals) video_info = batch_data_samples[0].metainfo proposal_list = post_processing(new_proposals, video_info, self.soft_nms_alpha, self.soft_nms_low_threshold, self.soft_nms_high_threshold, self.post_process_top_k, self.feature_extraction_interval) output = [ dict( video_name=video_info['video_name'], proposal_list=proposal_list) ] return output
Define the computation performed at every call when testing.
predict
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples, num_samples_per_bin): """Generate sample mask for a boundary-matching pair.""" plen = float(seg_tmax - seg_tmin) plen_sample = plen / (num_samples * num_samples_per_bin - 1.0) total_samples = [ seg_tmin + plen_sample * i for i in range(num_samples * num_samples_per_bin) ] p_mask = [] for idx in range(num_samples): bin_samples = total_samples[idx * num_samples_per_bin:(idx + 1) * num_samples_per_bin] bin_vector = np.zeros(tscale) for sample in bin_samples: sample_upper = math.ceil(sample) sample_decimal, sample_down = math.modf(sample) if 0 <= int(sample_down) <= (tscale - 1): bin_vector[int(sample_down)] += 1 - sample_decimal if 0 <= int(sample_upper) <= (tscale - 1): bin_vector[int(sample_upper)] += sample_decimal bin_vector = 1.0 / num_samples_per_bin * bin_vector p_mask.append(bin_vector) p_mask = np.stack(p_mask, axis=1) return p_mask
Generate sample mask for a boundary-matching pair.
_get_interp1d_bin_mask
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _get_interp1d_mask(self): """Generate sample mask for each point in Boundary-Matching Map.""" mask_mat = [] for start_index in range(self.tscale): mask_mat_vector = [] for duration_index in range(self.tscale): if start_index + duration_index < self.tscale: p_tmin = start_index p_tmax = start_index + duration_index center_len = float(p_tmax - p_tmin) + 1 sample_tmin = p_tmin - (center_len * self.boundary_ratio) sample_tmax = p_tmax + (center_len * self.boundary_ratio) p_mask = self._get_interp1d_bin_mask( sample_tmin, sample_tmax, self.tscale, self.num_samples, self.num_samples_per_bin) else: p_mask = np.zeros([self.tscale, self.num_samples]) mask_mat_vector.append(p_mask) mask_mat_vector = np.stack(mask_mat_vector, axis=2) mask_mat.append(mask_mat_vector) mask_mat = np.stack(mask_mat, axis=3) mask_mat = mask_mat.astype(np.float32) self.sample_mask = nn.Parameter( torch.tensor(mask_mat).view(self.tscale, -1), requires_grad=False)
Generate sample mask for each point in Boundary-Matching Map.
_get_interp1d_mask
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.): """Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. Default: 1. Returns: tuple[Sequence[float]]: The minimum and maximum values of temporal anchors. """ temporal_gap = 1. / self.tscale anchors_tmins = [] anchors_tmaxs = [] for i in range(self.tscale): anchors_tmins.append(temporal_gap * (i + tmin_offset)) anchors_tmaxs.append(temporal_gap * (i + tmax_offset)) return anchors_tmins, anchors_tmaxs
Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. Default: 1. Returns: tuple[Sequence[float]]: The minimum and maximum values of temporal anchors.
_temporal_anchors
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _forward(self, x): """Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ # x.shape [batch_size, self.feat_dim, self.tscale] base_feature = self.x_1d_b(x) # base_feature.shape [batch_size, self.hidden_dim_1d, self.tscale] start = self.x_1d_s(base_feature).squeeze(1) # start.shape [batch_size, self.tscale] end = self.x_1d_e(base_feature).squeeze(1) # end.shape [batch_size, self.tscale] confidence_map = self.x_1d_p(base_feature) # [batch_size, self.hidden_dim_1d, self.tscale] confidence_map = self._boundary_matching_layer(confidence_map) # [batch_size, self.hidden_dim_1d,, self.num_sampls, self.tscale, self.tscale] # noqa confidence_map = self.x_3d_p(confidence_map).squeeze(2) # [batch_size, self.hidden_dim_3d, self.tscale, self.tscale] confidence_map = self.x_2d_p(confidence_map) # [batch_size, 2, self.tscale, self.tscale] return confidence_map, start, end
Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
_forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: batch_inputs (Tensor): The input tensor with shape (N, C, ...) in general. batch_data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ inputs = torch.stack(inputs) if mode == 'tensor': return self._forward(inputs, **kwargs) if mode == 'predict': return self.predict(inputs, data_samples, **kwargs) elif mode == 'loss': return self.loss(inputs, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: batch_inputs (Tensor): The input tensor with shape (N, C, ...) in general. batch_data_samples (List[:obj:`ActionDataSample`], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Forward call for LGTE. Args: x (torch.Tensor): The input tensor with shape (B, C, L) """ x = x.permute(2, 0, 1) mask = self.mask.repeat(x.size(1), 1, 1, 1) L = x.shape[0] x = self.atten(x, attn_mask=mask.reshape(-1, L, L)) x = self.norm1(x) x = self.ffn(x) x = self.norm2(x) x = x.permute(1, 2, 0) return x
Forward call for LGTE. Args: x (torch.Tensor): The input tensor with shape (B, C, L)
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def StartEndRegressor(sample_num: int, feat_dim: int) -> nn.Module: """Start and End Regressor in the Temporal Boundary Regressor. Args: sample_num (int): number of samples for the start & end. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim * 2, sample_num). """ hidden_dim = 128 regressor = nn.Sequential( nn.Conv1d( feat_dim * 2, hidden_dim * 2, kernel_size=3, padding=1, groups=8, stride=2), nn.ReLU(inplace=True), nn.Conv1d( hidden_dim * 2, hidden_dim * 2, kernel_size=3, padding=1, groups=8, stride=2), nn.ReLU(inplace=True), nn.Conv1d(hidden_dim * 2, 2, kernel_size=sample_num // 4, groups=2), nn.Flatten()) return regressor
Start and End Regressor in the Temporal Boundary Regressor. Args: sample_num (int): number of samples for the start & end. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim * 2, sample_num).
StartEndRegressor
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def CenterWidthRegressor(temporal_len: int, feat_dim: int) -> nn.Module: """Center Width in the Temporal Boundary Regressor. Args: temporal_len (int): temporal dimension of the inputs. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim, temporal_len). """ hidden_dim = 512 regressor = nn.Sequential( nn.Conv1d( feat_dim, hidden_dim, kernel_size=3, padding=1, groups=4, stride=2), nn.ReLU(inplace=True), nn.Conv1d( hidden_dim, hidden_dim, kernel_size=3, padding=1, groups=4, stride=2), nn.ReLU(inplace=True), nn.Conv1d( hidden_dim, hidden_dim, kernel_size=temporal_len // 4, groups=4), nn.ReLU(inplace=True), nn.Conv1d(hidden_dim, 3, kernel_size=1)) return regressor
Center Width in the Temporal Boundary Regressor. Args: temporal_len (int): temporal dimension of the inputs. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim, temporal_len).
CenterWidthRegressor
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def generate_candidate_proposals(video_list, video_infos, tem_results_dir, temporal_scale, peak_threshold, tem_results_ext='.csv', result_dict=None): """Generate Candidate Proposals with given temporal evaluation results. Each proposal file will contain: 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'. Args: video_list (list[int]): List of video indexes to generate proposals. video_infos (list[dict]): List of video_info dict that contains 'video_name', 'duration_frame', 'duration_second', 'feature_frame', and 'annotations'. tem_results_dir (str): Directory to load temporal evaluation results. temporal_scale (int): The number (scale) on temporal axis. peak_threshold (float): The threshold for proposal generation. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: dict: A dict contains video_name as keys and proposal list as value. If result_dict is not None, save the results to it. """ if tem_results_ext != '.csv': raise NotImplementedError('Only support csv format now.') tscale = temporal_scale tgap = 1. / tscale proposal_dict = {} for video_index in video_list: video_name = video_infos[video_index]['video_name'] tem_path = osp.join(tem_results_dir, video_name + tem_results_ext) tem_results = np.loadtxt( tem_path, dtype=np.float32, delimiter=',', skiprows=1) start_scores = tem_results[:, 1] end_scores = tem_results[:, 2] max_start = max(start_scores) max_end = max(end_scores) start_bins = np.zeros(len(start_scores)) start_bins[[0, -1]] = 1 end_bins = np.zeros(len(end_scores)) end_bins[[0, -1]] = 1 for idx in range(1, tscale - 1): if start_scores[idx] > start_scores[ idx + 1] and start_scores[idx] > start_scores[idx - 1]: start_bins[idx] = 1 elif start_scores[idx] > (peak_threshold * max_start): start_bins[idx] = 1 if end_scores[idx] > end_scores[ idx + 1] and end_scores[idx] > end_scores[idx - 1]: end_bins[idx] = 1 elif end_scores[idx] > (peak_threshold * max_end): end_bins[idx] = 1 tmin_list = [] tmin_score_list = [] tmax_list = [] tmax_score_list = [] for idx in range(tscale): if start_bins[idx] == 1: tmin_list.append(tgap / 2 + tgap * idx) tmin_score_list.append(start_scores[idx]) if end_bins[idx] == 1: tmax_list.append(tgap / 2 + tgap * idx) tmax_score_list.append(end_scores[idx]) new_props = [] for tmax, tmax_score in zip(tmax_list, tmax_score_list): for tmin, tmin_score in zip(tmin_list, tmin_score_list): if tmin >= tmax: break new_props.append([tmin, tmax, tmin_score, tmax_score]) new_props = np.stack(new_props) score = (new_props[:, 2] * new_props[:, 3]).reshape(-1, 1) new_props = np.concatenate((new_props, score), axis=1) new_props = new_props[new_props[:, -1].argsort()[::-1]] video_info = video_infos[video_index] video_frame = video_info['duration_frame'] video_second = video_info['duration_second'] feature_frame = video_info['feature_frame'] corrected_second = float(feature_frame) / video_frame * video_second gt_tmins = [] gt_tmaxs = [] for annotations in video_info['annotations']: gt_tmins.append(annotations['segment'][0] / corrected_second) gt_tmaxs.append(annotations['segment'][1] / corrected_second) new_iou_list = [] new_ioa_list = [] for new_prop in new_props: new_iou = max( temporal_iou(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs)) new_ioa = max( temporal_iop(new_prop[0], new_prop[1], gt_tmins, gt_tmaxs)) new_iou_list.append(new_iou) new_ioa_list.append(new_ioa) new_iou_list = np.array(new_iou_list).reshape(-1, 1) new_ioa_list = np.array(new_ioa_list).reshape(-1, 1) new_props = np.concatenate((new_props, new_iou_list), axis=1) new_props = np.concatenate((new_props, new_ioa_list), axis=1) proposal_dict[video_name] = new_props if result_dict is not None: result_dict[video_name] = new_props return proposal_dict
Generate Candidate Proposals with given temporal evaluation results. Each proposal file will contain: 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'. Args: video_list (list[int]): List of video indexes to generate proposals. video_infos (list[dict]): List of video_info dict that contains 'video_name', 'duration_frame', 'duration_second', 'feature_frame', and 'annotations'. tem_results_dir (str): Directory to load temporal evaluation results. temporal_scale (int): The number (scale) on temporal axis. peak_threshold (float): The threshold for proposal generation. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: dict: A dict contains video_name as keys and proposal list as value. If result_dict is not None, save the results to it.
generate_candidate_proposals
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/bsn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py
Apache-2.0
def generate_bsp_feature(video_list, video_infos, tem_results_dir, pgm_proposals_dir, top_k=1000, bsp_boundary_ratio=0.2, num_sample_start=8, num_sample_end=8, num_sample_action=16, num_sample_interp=3, tem_results_ext='.csv', pgm_proposal_ext='.csv', result_dict=None): """Generate Boundary-Sensitive Proposal Feature with given proposals. Args: video_list (list[int]): List of video indexes to generate bsp_feature. video_infos (list[dict]): List of video_info dict that contains 'video_name'. tem_results_dir (str): Directory to load temporal evaluation results. pgm_proposals_dir (str): Directory to load proposals. top_k (int): Number of proposals to be considered. Default: 1000 bsp_boundary_ratio (float): Ratio for proposal boundary (start/end). Default: 0.2. num_sample_start (int): Num of samples for actionness in start region. Default: 8. num_sample_end (int): Num of samples for actionness in end region. Default: 8. num_sample_action (int): Num of samples for actionness in center region. Default: 16. num_sample_interp (int): Num of samples for interpolation for each sample point. Default: 3. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. pgm_proposal_ext (str): File extension for proposals. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: bsp_feature_dict (dict): A dict contains video_name as keys and bsp_feature as value. If result_dict is not None, save the results to it. """ if tem_results_ext != '.csv' or pgm_proposal_ext != '.csv': raise NotImplementedError('Only support csv format now.') bsp_feature_dict = {} for video_index in video_list: video_name = video_infos[video_index]['video_name'] # Load temporal evaluation results tem_path = osp.join(tem_results_dir, video_name + tem_results_ext) tem_results = np.loadtxt( tem_path, dtype=np.float32, delimiter=',', skiprows=1) score_action = tem_results[:, 0] seg_tmins = tem_results[:, 3] seg_tmaxs = tem_results[:, 4] video_scale = len(tem_results) video_gap = seg_tmaxs[0] - seg_tmins[0] video_extend = int(video_scale / 4 + 10) # Load proposals results proposal_path = osp.join(pgm_proposals_dir, video_name + pgm_proposal_ext) pgm_proposals = np.loadtxt( proposal_path, dtype=np.float32, delimiter=',', skiprows=1) pgm_proposals = pgm_proposals[:top_k] # Generate temporal sample points boundary_zeros = np.zeros([video_extend]) score_action = np.concatenate( (boundary_zeros, score_action, boundary_zeros)) begin_tp = [] middle_tp = [] end_tp = [] for i in range(video_extend): begin_tp.append(-video_gap / 2 - (video_extend - 1 - i) * video_gap) end_tp.append(video_gap / 2 + seg_tmaxs[-1] + i * video_gap) for i in range(video_scale): middle_tp.append(video_gap / 2 + i * video_gap) t_points = begin_tp + middle_tp + end_tp bsp_feature = [] for pgm_proposal in pgm_proposals: tmin = pgm_proposal[0] tmax = pgm_proposal[1] tlen = tmax - tmin # Temporal range for start tmin_0 = tmin - tlen * bsp_boundary_ratio tmin_1 = tmin + tlen * bsp_boundary_ratio # Temporal range for end tmax_0 = tmax - tlen * bsp_boundary_ratio tmax_1 = tmax + tlen * bsp_boundary_ratio # Generate features at start boundary tlen_start = (tmin_1 - tmin_0) / (num_sample_start - 1) tlen_start_sample = tlen_start / num_sample_interp t_new = [ tmin_0 - tlen_start / 2 + tlen_start_sample * i for i in range(num_sample_start * num_sample_interp + 1) ] y_new_start_action = np.interp(t_new, t_points, score_action) y_new_start = [ np.mean(y_new_start_action[i * num_sample_interp:(i + 1) * num_sample_interp + 1]) for i in range(num_sample_start) ] # Generate features at end boundary tlen_end = (tmax_1 - tmax_0) / (num_sample_end - 1) tlen_end_sample = tlen_end / num_sample_interp t_new = [ tmax_0 - tlen_end / 2 + tlen_end_sample * i for i in range(num_sample_end * num_sample_interp + 1) ] y_new_end_action = np.interp(t_new, t_points, score_action) y_new_end = [ np.mean(y_new_end_action[i * num_sample_interp:(i + 1) * num_sample_interp + 1]) for i in range(num_sample_end) ] # Generate features for action tlen_action = (tmax - tmin) / (num_sample_action - 1) tlen_action_sample = tlen_action / num_sample_interp t_new = [ tmin - tlen_action / 2 + tlen_action_sample * i for i in range(num_sample_action * num_sample_interp + 1) ] y_new_action = np.interp(t_new, t_points, score_action) y_new_action = [ np.mean(y_new_action[i * num_sample_interp:(i + 1) * num_sample_interp + 1]) for i in range(num_sample_action) ] feature = np.concatenate([y_new_action, y_new_start, y_new_end]) bsp_feature.append(feature) bsp_feature = np.array(bsp_feature) bsp_feature_dict[video_name] = bsp_feature if result_dict is not None: result_dict[video_name] = bsp_feature return bsp_feature_dict
Generate Boundary-Sensitive Proposal Feature with given proposals. Args: video_list (list[int]): List of video indexes to generate bsp_feature. video_infos (list[dict]): List of video_info dict that contains 'video_name'. tem_results_dir (str): Directory to load temporal evaluation results. pgm_proposals_dir (str): Directory to load proposals. top_k (int): Number of proposals to be considered. Default: 1000 bsp_boundary_ratio (float): Ratio for proposal boundary (start/end). Default: 0.2. num_sample_start (int): Num of samples for actionness in start region. Default: 8. num_sample_end (int): Num of samples for actionness in end region. Default: 8. num_sample_action (int): Num of samples for actionness in center region. Default: 16. num_sample_interp (int): Num of samples for interpolation for each sample point. Default: 3. tem_results_ext (str): File extension for temporal evaluation model output. Default: '.csv'. pgm_proposal_ext (str): File extension for proposals. Default: '.csv'. result_dict (dict | None): The dict to save the results. Default: None. Returns: bsp_feature_dict (dict): A dict contains video_name as keys and bsp_feature as value. If result_dict is not None, save the results to it.
generate_bsp_feature
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/bsn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py
Apache-2.0
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max): """Compute IoU score between a groundtruth bbox and the proposals. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of iou scores. """ len_anchors = proposal_max - proposal_min int_tmin = np.maximum(proposal_min, gt_min) int_tmax = np.minimum(proposal_max, gt_max) inter_len = np.maximum(int_tmax - int_tmin, 0.) union_len = len_anchors - inter_len + gt_max - gt_min jaccard = np.divide(inter_len, union_len) return jaccard
Compute IoU score between a groundtruth bbox and the proposals. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of iou scores.
temporal_iou
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max): """Compute IoP score between a groundtruth bbox and the proposals. Compute the IoP which is defined as the overlap ratio with groundtruth proportional to the duration of this proposal. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of intersection over anchor scores. """ len_anchors = np.array(proposal_max - proposal_min) int_tmin = np.maximum(proposal_min, gt_min) int_tmax = np.minimum(proposal_max, gt_max) inter_len = np.maximum(int_tmax - int_tmin, 0.) scores = np.divide(inter_len, len_anchors) return scores
Compute IoP score between a groundtruth bbox and the proposals. Compute the IoP which is defined as the overlap ratio with groundtruth proportional to the duration of this proposal. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Returns: list[float]: List of intersection over anchor scores.
temporal_iop
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k): """Soft NMS for temporal proposals. Args: proposals (np.ndarray): Proposals generated by network. alpha (float): Alpha value of Gaussian decaying function. low_threshold (float): Low threshold for soft nms. high_threshold (float): High threshold for soft nms. top_k (int): Top k values to be considered. Returns: np.ndarray: The updated proposals. """ proposals = proposals[proposals[:, -1].argsort()[::-1]] tstart = list(proposals[:, 0]) tend = list(proposals[:, 1]) tscore = list(proposals[:, -1]) rstart = [] rend = [] rscore = [] while len(tscore) > 0 and len(rscore) <= top_k: max_index = np.argmax(tscore) max_width = tend[max_index] - tstart[max_index] iou_list = temporal_iou(tstart[max_index], tend[max_index], np.array(tstart), np.array(tend)) iou_exp_list = np.exp(-np.square(iou_list) / alpha) for idx, _ in enumerate(tscore): if idx != max_index: current_iou = iou_list[idx] if current_iou > low_threshold + (high_threshold - low_threshold) * max_width: tscore[idx] = tscore[idx] * iou_exp_list[idx] rstart.append(tstart[max_index]) rend.append(tend[max_index]) rscore.append(tscore[max_index]) tstart.pop(max_index) tend.pop(max_index) tscore.pop(max_index) rstart = np.array(rstart).reshape(-1, 1) rend = np.array(rend).reshape(-1, 1) rscore = np.array(rscore).reshape(-1, 1) new_proposals = np.concatenate((rstart, rend, rscore), axis=1) return new_proposals
Soft NMS for temporal proposals. Args: proposals (np.ndarray): Proposals generated by network. alpha (float): Alpha value of Gaussian decaying function. low_threshold (float): Low threshold for soft nms. high_threshold (float): High threshold for soft nms. top_k (int): Top k values to be considered. Returns: np.ndarray: The updated proposals.
soft_nms
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold, soft_nms_high_threshold, post_process_top_k, feature_extraction_interval): """Post process for temporal proposals generation. Args: result (np.ndarray): Proposals generated by network. video_info (dict): Meta data of video. Required keys are 'duration_frame', 'duration_second'. soft_nms_alpha (float): Alpha value of Gaussian decaying function. soft_nms_low_threshold (float): Low threshold for soft nms. soft_nms_high_threshold (float): High threshold for soft nms. post_process_top_k (int): Top k values to be considered. feature_extraction_interval (int): Interval used in feature extraction. Returns: list[dict]: The updated proposals, e.g. [{'score': 0.9, 'segment': [0, 1]}, {'score': 0.8, 'segment': [0, 2]}, ...]. """ if len(result) > 1: result = soft_nms(result, soft_nms_alpha, soft_nms_low_threshold, soft_nms_high_threshold, post_process_top_k) result = result[result[:, -1].argsort()[::-1]] video_duration = float( video_info['duration_frame'] // feature_extraction_interval * feature_extraction_interval ) / video_info['duration_frame'] * video_info['duration_second'] proposal_list = [] for j in range(min(post_process_top_k, len(result))): proposal = {} proposal['score'] = float(result[j, -1]) proposal['segment'] = [ max(0, result[j, 0]) * video_duration, min(1, result[j, 1]) * video_duration ] proposal_list.append(proposal) return proposal_list
Post process for temporal proposals generation. Args: result (np.ndarray): Proposals generated by network. video_info (dict): Meta data of video. Required keys are 'duration_frame', 'duration_second'. soft_nms_alpha (float): Alpha value of Gaussian decaying function. soft_nms_low_threshold (float): Low threshold for soft nms. soft_nms_high_threshold (float): High threshold for soft nms. post_process_top_k (int): Top k values to be considered. feature_extraction_interval (int): Interval used in feature extraction. Returns: list[dict]: The updated proposals, e.g. [{'score': 0.9, 'segment': [0, 1]}, {'score': 0.8, 'segment': [0, 2]}, ...].
post_processing
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def forward(self, *args, **kwargs): """Defines the computation performed at every call. Args: *args: The positional arguments for the corresponding loss. **kwargs: The keyword arguments for the corresponding loss. Returns: torch.Tensor: The calculated loss. """ ret = self._forward(*args, **kwargs) if isinstance(ret, dict): for k in ret: if 'loss' in k: ret[k] *= self.loss_weight else: ret *= self.loss_weight return ret
Defines the computation performed at every call. Args: *args: The positional arguments for the corresponding loss. **kwargs: The keyword arguments for the corresponding loss. Returns: torch.Tensor: The calculated loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/base.py
Apache-2.0
def forward(self, reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-5): """Calculate Binary Logistic Regression Loss. Args: reg_score (torch.Tensor): Predicted score by model. label (torch.Tensor): Groundtruth labels. threshold (float): Threshold for positive instances. Default: 0.5. ratio_range (tuple): Lower bound and upper bound for ratio. Default: (1.05, 21) eps (float): Epsilon for small value. Default: 1e-5. Returns: torch.Tensor: Returned binary logistic loss. """ return binary_logistic_regression_loss(reg_score, label, threshold, ratio_range, eps)
Calculate Binary Logistic Regression Loss. Args: reg_score (torch.Tensor): Predicted score by model. label (torch.Tensor): Groundtruth labels. threshold (float): Threshold for positive instances. Default: 0.5. ratio_range (tuple): Lower bound and upper bound for ratio. Default: (1.05, 21) eps (float): Epsilon for small value. Default: 1e-5. Returns: torch.Tensor: Returned binary logistic loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/binary_logistic_regression_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/binary_logistic_regression_loss.py
Apache-2.0
def tem_loss(pred_start, pred_end, gt_start, gt_end): """Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted start score by BMN model. pred_end (torch.Tensor): Predicted end score by BMN model. gt_start (torch.Tensor): Groundtruth confidence score for start. gt_end (torch.Tensor): Groundtruth confidence score for end. Returns: torch.Tensor: Returned binary logistic loss. """ loss_start = binary_logistic_regression_loss(pred_start, gt_start) loss_end = binary_logistic_regression_loss(pred_end, gt_end) loss = loss_start + loss_end return loss
Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted start score by BMN model. pred_end (torch.Tensor): Predicted end score by BMN model. gt_start (torch.Tensor): Groundtruth confidence score for start. gt_end (torch.Tensor): Groundtruth confidence score for end. Returns: torch.Tensor: Returned binary logistic loss.
tem_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def pem_reg_loss(pred_score, gt_iou_map, mask, high_temporal_iou_threshold=0.7, low_temporal_iou_threshold=0.3): """Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. high_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.7. low_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.3. Returns: torch.Tensor: Proposal evaluation regression loss. """ u_hmask = (gt_iou_map > high_temporal_iou_threshold).float() u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) & (gt_iou_map > low_temporal_iou_threshold)).float() u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) & (gt_iou_map > 0.)).float() u_lmask = u_lmask * mask num_h = torch.sum(u_hmask) num_m = torch.sum(u_mmask) num_l = torch.sum(u_lmask) r_m = num_h / num_m u_smmask = torch.rand_like(gt_iou_map) u_smmask = u_mmask * u_smmask u_smmask = (u_smmask > (1. - r_m)).float() r_l = num_h / num_l u_slmask = torch.rand_like(gt_iou_map) u_slmask = u_lmask * u_slmask u_slmask = (u_slmask > (1. - r_l)).float() weights = u_hmask + u_smmask + u_slmask loss = F.mse_loss(pred_score * weights, gt_iou_map * weights) loss = 0.5 * torch.sum( loss * torch.ones_like(weights)) / torch.sum(weights) return loss
Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. high_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.7. low_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.3. Returns: torch.Tensor: Proposal evaluation regression loss.
pem_reg_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def pem_cls_loss(pred_score, gt_iou_map, mask, threshold=0.9, ratio_range=(1.05, 21), eps=1e-5): """Calculate Proposal Evaluation Module Classification Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. threshold (float): Threshold of temporal_iou for positive instances. Default: 0.9. ratio_range (tuple): Lower bound and upper bound for ratio. Default: (1.05, 21) eps (float): Epsilon for small value. Default: 1e-5 Returns: torch.Tensor: Proposal evaluation classification loss. """ pmask = (gt_iou_map > threshold).float() nmask = (gt_iou_map <= threshold).float() nmask = nmask * mask num_positive = max(torch.sum(pmask), 1) num_entries = num_positive + torch.sum(nmask) ratio = num_entries / num_positive ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1]) coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio loss_pos = coef_1 * torch.log(pred_score + eps) * pmask loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries return loss
Calculate Proposal Evaluation Module Classification Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. threshold (float): Threshold of temporal_iou for positive instances. Default: 0.9. ratio_range (tuple): Lower bound and upper bound for ratio. Default: (1.05, 21) eps (float): Epsilon for small value. Default: 1e-5 Returns: torch.Tensor: Proposal evaluation classification loss.
pem_cls_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def forward(self, pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask, weight_tem=1.0, weight_pem_reg=10.0, weight_pem_cls=1.0): """Calculate Boundary Matching Network Loss. Args: pred_bm (torch.Tensor): Predicted confidence score for boundary matching map. pred_start (torch.Tensor): Predicted confidence score for start. pred_end (torch.Tensor): Predicted confidence score for end. gt_iou_map (torch.Tensor): Groundtruth score for boundary matching map. gt_start (torch.Tensor): Groundtruth temporal_iou score for start. gt_end (torch.Tensor): Groundtruth temporal_iou score for end. bm_mask (torch.Tensor): Boundary-Matching mask. weight_tem (float): Weight for tem loss. Default: 1.0. weight_pem_reg (float): Weight for pem regression loss. Default: 10.0. weight_pem_cls (float): Weight for pem classification loss. Default: 1.0. Returns: tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): (loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn loss, tem_loss is the temporal evaluation loss, pem_reg_loss is the proposal evaluation regression loss, pem_cls_loss is the proposal evaluation classification loss. """ pred_bm_reg = pred_bm[:, 0].contiguous() pred_bm_cls = pred_bm[:, 1].contiguous() gt_iou_map = gt_iou_map * bm_mask pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask) pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask) tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end) loss = ( weight_tem * tem_loss + weight_pem_reg * pem_reg_loss + weight_pem_cls * pem_cls_loss) return loss, tem_loss, pem_reg_loss, pem_cls_loss
Calculate Boundary Matching Network Loss. Args: pred_bm (torch.Tensor): Predicted confidence score for boundary matching map. pred_start (torch.Tensor): Predicted confidence score for start. pred_end (torch.Tensor): Predicted confidence score for end. gt_iou_map (torch.Tensor): Groundtruth score for boundary matching map. gt_start (torch.Tensor): Groundtruth temporal_iou score for start. gt_end (torch.Tensor): Groundtruth temporal_iou score for end. bm_mask (torch.Tensor): Boundary-Matching mask. weight_tem (float): Weight for tem loss. Default: 1.0. weight_pem_reg (float): Weight for pem regression loss. Default: 10.0. weight_pem_cls (float): Weight for pem classification loss. Default: 1.0. Returns: tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): (loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn loss, tem_loss is the temporal evaluation loss, pem_reg_loss is the proposal evaluation regression loss, pem_cls_loss is the proposal evaluation classification loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor, **kwargs) -> torch.Tensor: """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate CrossEntropy loss. Returns: torch.Tensor: The returned CrossEntropy loss. """ if cls_score.size() == label.size(): # calculate loss for soft label assert cls_score.dim() == 2, 'Only support 2-dim soft label' assert len(kwargs) == 0, \ ('For now, no extra args are supported for soft label, ' f'but get {kwargs}') lsm = F.log_softmax(cls_score, 1) if self.class_weight is not None: self.class_weight = self.class_weight.to(cls_score.device) lsm = lsm * self.class_weight.unsqueeze(0) loss_cls = -(label * lsm).sum(1) # default reduction 'mean' if self.class_weight is not None: # Use weighted average as pytorch CrossEntropyLoss does. # For more information, please visit https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html # noqa loss_cls = loss_cls.sum() / torch.sum( self.class_weight.unsqueeze(0) * label) else: loss_cls = loss_cls.mean() else: # calculate loss for hard label if self.class_weight is not None: assert 'weight' not in kwargs, \ "The key 'weight' already exists." kwargs['weight'] = self.class_weight.to(cls_score.device) loss_cls = F.cross_entropy(cls_score, label, **kwargs) return loss_cls
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate CrossEntropy loss. Returns: torch.Tensor: The returned CrossEntropy loss.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/cross_entropy_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py
Apache-2.0
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor, **kwargs) -> torch.Tensor: """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate bce loss with logits. Returns: torch.Tensor: The returned bce loss with logits. """ if self.class_weight is not None: assert 'weight' not in kwargs, "The key 'weight' already exists." kwargs['weight'] = self.class_weight.to(cls_score.device) loss_cls = F.binary_cross_entropy_with_logits(cls_score, label, **kwargs) return loss_cls
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate bce loss with logits. Returns: torch.Tensor: The returned bce loss with logits.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/cross_entropy_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py
Apache-2.0
def _forward(self, cls_score, label, mask, category_mask): """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. mask (torch.Tensor): The mask of tags. 0 indicates that the category of this tag is missing in the label of the video. category_mask (torch.Tensor): The category mask. For each sample, it's a tensor with length `len(self.categories)`, denotes that if the category is labeled for this video. Returns: torch.Tensor: The returned CrossEntropy loss. """ if self.loss_type == 'all': loss_cls = F.binary_cross_entropy_with_logits( cls_score, label, reduction='none') if self.with_mask: w_loss_cls = mask * loss_cls w_loss_cls = torch.sum(w_loss_cls, dim=1) if self.reduction == 'mean': w_loss_cls = w_loss_cls / torch.sum(mask, dim=1) w_loss_cls = torch.mean(w_loss_cls) return dict(loss_cls=w_loss_cls) if self.reduction == 'sum': loss_cls = torch.sum(loss_cls, dim=-1) return dict(loss_cls=torch.mean(loss_cls)) if self.loss_type == 'individual': losses = {} loss_weights = {} for name, num, start_idx in zip(self.categories, self.category_nums, self.category_startidx): category_score = cls_score[:, start_idx:start_idx + num] category_label = label[:, start_idx:start_idx + num] category_loss = F.binary_cross_entropy_with_logits( category_score, category_label, reduction='none') if self.reduction == 'mean': category_loss = torch.mean(category_loss, dim=1) elif self.reduction == 'sum': category_loss = torch.sum(category_loss, dim=1) idx = self.categories.index(name) if self.with_mask: category_mask_i = category_mask[:, idx].reshape(-1) # there should be at least one sample which contains tags # in this category if torch.sum(category_mask_i) < 0.5: losses[f'{name}_LOSS'] = torch.tensor( .0, device=get_device()) loss_weights[f'{name}_LOSS'] = .0 continue category_loss = torch.sum(category_loss * category_mask_i) category_loss = category_loss / torch.sum(category_mask_i) else: category_loss = torch.mean(category_loss) # We name the loss of each category as 'LOSS', since we only # want to monitor them, not backward them. We will also provide # the loss used for backward in the losses dictionary losses[f'{name}_LOSS'] = category_loss loss_weights[f'{name}_LOSS'] = self.category_loss_weights[idx] loss_weight_sum = sum(loss_weights.values()) loss_weights = { k: v / loss_weight_sum for k, v in loss_weights.items() } loss_cls = sum([losses[k] * loss_weights[k] for k in losses]) losses['loss_cls'] = loss_cls # We also trace the loss weights losses.update({ k + '_weight': torch.tensor(v).to(losses[k].device) for k, v in loss_weights.items() }) # Note that the loss weights are just for reference. return losses else: raise ValueError("loss_type should be 'all' or 'individual', " f'but got {self.loss_type}')
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. mask (torch.Tensor): The mask of tags. 0 indicates that the category of this tag is missing in the label of the video. category_mask (torch.Tensor): The category mask. For each sample, it's a tensor with length `len(self.categories)`, denotes that if the category is labeled for this video. Returns: torch.Tensor: The returned CrossEntropy loss.
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/hvu_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/hvu_loss.py
Apache-2.0
def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size): """Calculate OHEM hinge loss. Args: pred (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. is_positive (int): Set to 1 when proposals are positive and set to -1 when proposals are incomplete. ohem_ratio (float): Ratio of hard examples. group_size (int): Number of proposals sampled per video. Returns: torch.Tensor: Returned class-wise hinge loss. """ num_samples = pred.size(0) if num_samples != len(labels): raise ValueError(f'Number of samples should be equal to that ' f'of labels, but got {num_samples} samples and ' f'{len(labels)} labels.') losses = torch.zeros(num_samples, device=pred.device) slopes = torch.zeros(num_samples, device=pred.device) for i in range(num_samples): losses[i] = max(0, 1 - is_positive * pred[i, labels[i] - 1]) slopes[i] = -is_positive if losses[i] != 0 else 0 losses = losses.view(-1, group_size).contiguous() sorted_losses, indices = torch.sort(losses, dim=1, descending=True) keep_length = int(group_size * ohem_ratio) loss = torch.zeros(1, device=pred.device) for i in range(losses.size(0)): loss += sorted_losses[i, :keep_length].sum() ctx.loss_index = indices[:, :keep_length] ctx.labels = labels ctx.slopes = slopes ctx.shape = pred.size() ctx.group_size = group_size ctx.num_groups = losses.size(0) return loss
Calculate OHEM hinge loss. Args: pred (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. is_positive (int): Set to 1 when proposals are positive and set to -1 when proposals are incomplete. ohem_ratio (float): Ratio of hard examples. group_size (int): Number of proposals sampled per video. Returns: torch.Tensor: Returned class-wise hinge loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/ohem_hinge_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py
Apache-2.0
def backward(ctx, grad_output): """Defines a formula for differentiating the operation with backward mode automatic differentiation.""" labels = ctx.labels slopes = ctx.slopes grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device) for group in range(ctx.num_groups): for idx in ctx.loss_index[group]: loc = idx + group * ctx.group_size grad_in[loc, labels[loc] - 1] = ( slopes[loc] * grad_output.data[0]) return torch.autograd.Variable(grad_in), None, None, None, None
Defines a formula for differentiating the operation with backward mode automatic differentiation.
backward
python
open-mmlab/mmaction2
mmaction/models/losses/ohem_hinge_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py
Apache-2.0
def forward(self, activity_score, completeness_score, bbox_pred, proposal_type, labels, bbox_targets, train_cfg): """Calculate Boundary Matching Network Loss. Args: activity_score (torch.Tensor): Predicted activity score. completeness_score (torch.Tensor): Predicted completeness score. bbox_pred (torch.Tensor): Predicted interval center and span of positive proposals. proposal_type (torch.Tensor): Type index slices of proposals. labels (torch.Tensor): Groundtruth class label. bbox_targets (torch.Tensor): Groundtruth center and span of positive proposals. train_cfg (dict): Config for training. Returns: dict([torch.Tensor, torch.Tensor, torch.Tensor]): (loss_activity, loss_completeness, loss_reg). Loss_activity is the activity loss, loss_completeness is the class-wise completeness loss, loss_reg is the class-wise regression loss. """ self.sampler = train_cfg.ssn.sampler self.loss_weight = train_cfg.ssn.loss_weight losses = dict() proposal_type = proposal_type.view(-1) labels = labels.view(-1) activity_indexer = ((proposal_type == 0) + (proposal_type == 2)).nonzero().squeeze(1) completeness_indexer = ((proposal_type == 0) + (proposal_type == 1)).nonzero().squeeze(1) total_ratio = ( self.sampler.positive_ratio + self.sampler.background_ratio + self.sampler.incomplete_ratio) positive_per_video = int(self.sampler.num_per_video * (self.sampler.positive_ratio / total_ratio)) background_per_video = int( self.sampler.num_per_video * (self.sampler.background_ratio / total_ratio)) incomplete_per_video = ( self.sampler.num_per_video - positive_per_video - background_per_video) losses['loss_activity'] = self.activity_loss(activity_score, labels, activity_indexer) losses['loss_completeness'] = self.completeness_loss( completeness_score, labels, completeness_indexer, positive_per_video, incomplete_per_video, ohem_ratio=positive_per_video / incomplete_per_video) losses['loss_completeness'] *= self.loss_weight.comp_loss_weight if bbox_pred is not None: regression_indexer = (proposal_type == 0).nonzero().squeeze(1) bbox_targets = bbox_targets.view(-1, 2) losses['loss_reg'] = self.classwise_regression_loss( bbox_pred, labels, bbox_targets, regression_indexer) losses['loss_reg'] *= self.loss_weight.reg_loss_weight return losses
Calculate Boundary Matching Network Loss. Args: activity_score (torch.Tensor): Predicted activity score. completeness_score (torch.Tensor): Predicted completeness score. bbox_pred (torch.Tensor): Predicted interval center and span of positive proposals. proposal_type (torch.Tensor): Type index slices of proposals. labels (torch.Tensor): Groundtruth class label. bbox_targets (torch.Tensor): Groundtruth center and span of positive proposals. train_cfg (dict): Config for training. Returns: dict([torch.Tensor, torch.Tensor, torch.Tensor]): (loss_activity, loss_completeness, loss_reg). Loss_activity is the activity loss, loss_completeness is the class-wise completeness loss, loss_reg is the class-wise regression loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/ssn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ssn_loss.py
Apache-2.0
def forward(self, hidden_states: torch.Tensor): """forward function. Args: hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] Returns: TODO """ b = hidden_states.shape[0] output = einops.rearrange(hidden_states, 'b t l c -> (b l) t c') output = self.layernorm_before(output) output = self.attention(output) output = einops.rearrange(output[0], '(b l) t c -> b t l c', b=b) return hidden_states + self.drop_path(output[0]) * self.scale
forward function. Args: hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] Returns: TODO
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None ) -> torch.Tensor: """ Args: pixel_values (torch.Tensor): The input image patches. Shape: [B, T, C, H, W]. """ t = pixel_values.shape[1] pixel_values = einops.rearrange(pixel_values, 'b t c h w -> (b t) c h w') embeddings = self.patch_embeddings(pixel_values) batch_size, seq_len, _ = embeddings.size() # [(b t) l c] cls_tokens = self.cls_token.expand(batch_size, -1, -1) if bool_masked_pos is not None: mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) # replace the masked visual tokens by mask_tokens w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) embeddings = embeddings * (1 - w) + mask_tokens * w if self.prompt_tokens is not None: prompt_tokens = self.prompt_tokens.expand(batch_size, -1, -1) embeddings = torch.cat((cls_tokens, embeddings, prompt_tokens), dim=1) else: embeddings = torch.cat((cls_tokens, embeddings), dim=1) # [B*T, L, C] if self.position_embeddings is not None: embeddings = embeddings + self.position_embeddings embeddings = einops.rearrange(embeddings, '(b t) l c -> b t l c', t=t) if self.temporal_position_embeddings is not None: if t <= self.temporal_position_embeddings.shape[1]: embeddings = embeddings + \ self.temporal_position_embeddings[:, :t] else: tpe = interpolate_temporal_pos_embed( self.temporal_position_embeddings, t) embeddings = embeddings + tpe embeddings = self.dropout(embeddings) return embeddings
Args: pixel_values (torch.Tensor): The input image patches. Shape: [B, T, C, H, W].
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def forward(self, x: torch.Tensor): """forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input. """ if x.shape[1] == 1: # for single frame, return itself. return x shortcut = x x = self.linear1(x) cls = x[:, :, :1, :] tokens = x[:, :, 1:, :] tokens = einops.rearrange( tokens, 'b t (h w) c -> b c t h w', h=self.h).contiguous() tokens = self.conv(tokens) tokens = einops.rearrange(tokens, 'b c t h w -> b t (h w) c') x = torch.cat([cls, tokens], dim=2) # [b, t, 1+h*w, c] x = self.act(x) x = self.linear2(x) return shortcut + self.scale * self.droppath(x)
forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input.
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def __init__(self, input_dim=768, droppath_rate=0.1): """ Kwargs: input_dim (int): The input feature dimension. """ super().__init__() self._input_dim = input_dim self.temporal_attn = MultiheadAttention( input_dim, num_heads=input_dim // 64) self.norm = LayerNorm(input_dim, eps=1e-12) self.linear = Linear(input_dim, input_dim) self.droppath = DropPath(droppath_rate) self.scale = nn.parameter.Parameter(torch.zeros([]))
Kwargs: input_dim (int): The input feature dimension.
__init__
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None) -> List[int]: """Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
build_inputs_with_special_tokens
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/tokenizer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/tokenizer.py
Apache-2.0
def interpolate_pos_embed_beit(state_dict, new_model): """interpolate the positional embeddings. The spatial pe is relative and temporal pe is absolute. additional temporal pe is padded with 0. Args: state_dict (dict): The state_dict. new_model (nn.Module): The created model. Returns: dict. The state_dict with updated positional embeddings. """ state_dict = interpolate_pos_relative_bias_beit( state_dict_old=state_dict, state_dict_new=new_model.state_dict(), patch_shape_new=new_model.vision_encoder.embeddings.patch_embeddings. patch_shape, ) # absolute temporal pos bias temporal_pe_key = 'vision_encoder.embeddings.temporal_position_embeddings' if temporal_pe_key in state_dict: logger = MMLogger.get_current_instance() logger.info( f'interpolate temporal positional embeddings: {temporal_pe_key}') state_dict[temporal_pe_key] = load_temp_embed_with_mismatch( temp_embed_old=state_dict[temporal_pe_key], temp_embed_new=new_model.state_dict()[temporal_pe_key], ) return state_dict
interpolate the positional embeddings. The spatial pe is relative and temporal pe is absolute. additional temporal pe is padded with 0. Args: state_dict (dict): The state_dict. new_model (nn.Module): The created model. Returns: dict. The state_dict with updated positional embeddings.
interpolate_pos_embed_beit
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def load_temp_embed_with_mismatch(temp_embed_old, temp_embed_new, add_zero=True): """Add/Remove extra temporal_embeddings as needed. https://arxiv.org/abs/2104.00650 shows adding zero paddings works. temp_embed_old: (1, num_frames_old, 1, d) temp_embed_new: (1, num_frames_new, 1, d) add_zero: bool, if True, add zero, else, interpolate trained embeddings. """ # TODO zero pad num_frms_new = temp_embed_new.shape[1] num_frms_old = temp_embed_old.shape[1] logger = MMLogger.get_current_instance() logger.info( f'Load temporal_embeddings, lengths: {num_frms_old}-->{num_frms_new}') if num_frms_new > num_frms_old: if add_zero: temp_embed_new[:, :num_frms_old] \ = temp_embed_old # untrained embeddings are zeros. else: temp_embed_new = interpolate_temporal_pos_embed( temp_embed_old, num_frms_new) elif num_frms_new < num_frms_old: temp_embed_new = temp_embed_old[:, :num_frms_new] else: # = temp_embed_new = temp_embed_old return temp_embed_new
Add/Remove extra temporal_embeddings as needed. https://arxiv.org/abs/2104.00650 shows adding zero paddings works. temp_embed_old: (1, num_frames_old, 1, d) temp_embed_new: (1, num_frames_new, 1, d) add_zero: bool, if True, add zero, else, interpolate trained embeddings.
load_temp_embed_with_mismatch
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def interpolate_pos_relative_bias_beit(state_dict_old, state_dict_new, patch_shape_new): """ Args: state_dict_old: loaded state dict state_dict_new: state dict for model with new image size patch_shape_new: new model patch_shape ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501 """ all_keys = list(state_dict_old.keys()) for key in all_keys: if 'relative_position_index' in key: state_dict_old.pop(key) if 'relative_position_bias_table' in key: rel_pos_bias = state_dict_old[key] src_num_pos, num_attn_heads = rel_pos_bias.size() dst_num_pos, _ = state_dict_new[key].size() dst_patch_shape = patch_shape_new if dst_patch_shape[0] != dst_patch_shape[1]: raise NotImplementedError() num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * ( dst_patch_shape[1] * 2 - 1) src_size = int((src_num_pos - num_extra_tokens)**0.5) dst_size = int((dst_num_pos - num_extra_tokens)**0.5) if src_size != dst_size: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] def geometric_progression(a, r, n): return a * (1.0 - r**n) / (1.0 - r) left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src_size // 2) if gp > dst_size // 2: right = q else: left = q dis = [] cur = 1 for i in range(src_size // 2): dis.append(cur) cur += q**(i + 1) r_ids = [-_ for _ in reversed(dis)] x = r_ids + [0] + dis y = r_ids + [0] + dis t = dst_size // 2.0 dx = np.arange(-t, t + 0.1, 1.0) dy = np.arange(-t, t + 0.1, 1.0) all_rel_pos_bias = [] for i in range(num_attn_heads): z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() f = interpolate.interp2d(x, y, z, kind='cubic') all_rel_pos_bias.append( torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to( rel_pos_bias.device)) rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) state_dict_old[key] = new_rel_pos_bias return state_dict_old
Args: state_dict_old: loaded state dict state_dict_new: state dict for model with new image size patch_shape_new: new model patch_shape ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501
interpolate_pos_relative_bias_beit
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def forward(self, inputs, data_samples, mode: str = 'loss'): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[``ActionDataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ if mode == 'tensor': return self.extract_feat(inputs, data_samples) elif mode == 'loss': return self.loss(inputs, data_samples) elif mode == 'predict': return self.predict(inputs, data_samples) else: raise RuntimeError(f'Invalid mode "{mode}".')
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: inputs (torch.Tensor): The input tensor with shape (N, C, ...) in general. data_samples (List[``ActionDataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py
Apache-2.0
def encode_vision(self, image): """encode image / videos as features. Args: image (torch.Tensor): The input images. Returns: tuple. - vision_embeds (torch.Tensor): The features of all patches. Shape: [B,T,L,C]. - pooled_vision_embeds (torch.Tensor): The pooled features. Shape: [B,T,C]. """ output_dict = self.vision_encoder(image) vision_embeds = self.vision_layernorm(output_dict.last_hidden_state) pooled_vision_embeds = output_dict.pooler_output return vision_embeds, pooled_vision_embeds
encode image / videos as features. Args: image (torch.Tensor): The input images. Returns: tuple. - vision_embeds (torch.Tensor): The features of all patches. Shape: [B,T,L,C]. - pooled_vision_embeds (torch.Tensor): The pooled features. Shape: [B,T,C].
encode_vision
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py
Apache-2.0
def encode_text(self, text): """encode text. Args: text (dict): The output of huggingface's `PreTrainedTokenizer`. contains keys: - input_ids (torch.Tensor): Token ids to be fed to a model. Shape: [B,L]. - attention_mask (torch.Tensor): The mask indicate padded tokens. Shape: [B,L]. 0 is padded token. - other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". # noqa: E501 Returns: tuple. - text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C]. - pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C]. """ text_output = self.text_encoder( text.input_ids, attention_mask=text.attention_mask, return_dict=True, mode='text', ) text_embeds = text_output.last_hidden_state pooled_text_embeds = text_embeds[:, 0] return text_embeds, pooled_text_embeds
encode text. Args: text (dict): The output of huggingface's `PreTrainedTokenizer`. contains keys: - input_ids (torch.Tensor): Token ids to be fed to a model. Shape: [B,L]. - attention_mask (torch.Tensor): The mask indicate padded tokens. Shape: [B,L]. 0 is padded token. - other keys refer to "https://huggingface.co/docs/transformers/v4.21.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__". # noqa: E501 Returns: tuple. - text_embeds (torch.Tensor): The features of all tokens. Shape: [B,L,C]. - pooled_text_embeds (torch.Tensor): The pooled features. Shape: [B,C].
encode_text
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py
Apache-2.0
def loss( self, inputs: torch.Tensor, data_samples: Optional[List[ActionDataSample]] = None, ) -> Dict[str, torch.tensor]: """Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of at least one modality. For image, the value is a tensor of shape (N, C, ...) in general. For text, the value is a dict of tokenized text inputs. data_samples (Optional[List[DataSample]]): The annotation data of every samples. Defaults to None. Returns: Dict[str, torch.tensor]: a dictionary of loss components of """ output = self.extract_feat(inputs, data_samples) text_embeds = output['text_embeds'] text_attn_mask = output['text_attn_mask'] image_embeds = output['image_embeds'] image_feat = output['image_feat'] text_feat = output['text_feat'] image_atts = torch.ones( image_embeds.size()[:-1], dtype=torch.long).to(self.device) # ITC Loss # B*world_size, D image_feat_all = torch.cat(dist.all_gather(image_feat)) # B*world_size, D text_feat_all = torch.cat(dist.all_gather(text_feat)) # image to text similarity # B, B*world_size sim_i2t = torch.einsum('mld,nd->mln', image_feat, text_feat_all).mean(1) / self.temp # text-image similarity # B, B*world_size sim_t2i = torch.einsum('md,nld->mln', text_feat, image_feat_all).mean(1) / self.temp rank = dist.get_rank() bs = inputs.size(0) itc_targets = torch.linspace( rank * bs, rank * bs + bs - 1, bs, dtype=int).to(self.device) itc_loss = (F.cross_entropy(sim_i2t, itc_targets) + F.cross_entropy(sim_t2i, itc_targets)) / 2 # prepare for itm output_pos = self.text_encoder( encoder_embeds=text_embeds, attention_mask=text_attn_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, mode='fusion', ) idx = torch.tensor([i.gt_video_id for i in data_samples]).view(-1, 1) bs = idx.size(0) if self.negative_all_rank: idxs = torch.cat(dist.all_gather(idx)) image_feat_world = torch.cat(dist.all_gather(image_feat)) text_feat_world = torch.cat(dist.all_gather(text_feat)) att_mask_world = torch.cat(dist.all_gather(text_attn_mask)) text_embeds_world = torch.cat(all_gather_with_grad(text_embeds)) image_embeds_world = torch.cat(all_gather_with_grad(image_embeds)) else: idxs = idx image_feat_world = image_feat.detach() text_feat_world = text_feat.detach() image_embeds_world = image_embeds text_embeds_world = text_embeds att_mask_world = text_attn_mask with torch.no_grad(): # compute sample similarity sim_i2t = torch.einsum('mld,nd->mln', image_feat, text_feat_world).mean(1) / self.temp sim_t2i = torch.einsum('md,nld->mln', text_feat, image_feat_world).mean(1) / self.temp mask = torch.eq(idx, idxs.t()).to(self.device) weights_i2t = F.softmax(sim_i2t + 1e-4, dim=1) weights_i2t.masked_fill_(mask, 0) weights_t2i = F.softmax(sim_t2i + 1e-4, dim=1) weights_t2i.masked_fill_(mask, 0) # select a negative image for each text neg_idx = torch.multinomial(weights_t2i, 1).squeeze() image_embeds_neg = image_embeds_world[neg_idx] # select a negative text for each image neg_idx = torch.multinomial(weights_i2t, 1).squeeze() text_embeds_neg = text_embeds_world[neg_idx] text_atts_neg = att_mask_world[neg_idx] text_embeds_all = torch.cat([text_embeds, text_embeds_neg], dim=0) text_atts_all = torch.cat([text_attn_mask, text_atts_neg], dim=0) image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0) image_atts_all = torch.cat([image_atts, image_atts], dim=0) output_neg = self.text_encoder( encoder_embeds=text_embeds_all, attention_mask=text_atts_all, encoder_hidden_states=image_embeds_all, encoder_attention_mask=image_atts_all, return_dict=True, mode='fusion', ) vl_embeddings = torch.cat( [ output_pos.last_hidden_state[:, 0, :], output_neg.last_hidden_state[:, 0, :], ], dim=0, ) itm_targets = torch.ones((3 * bs, ), dtype=torch.long, device=inputs.device) itm_targets[bs:] = 0 itm_logit = self.itm_head(vl_embeddings) itm_loss = F.cross_entropy(itm_logit, itm_targets) return dict(itc_loss=itc_loss, itm_loss=itm_loss)
Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of at least one modality. For image, the value is a tensor of shape (N, C, ...) in general. For text, the value is a dict of tokenized text inputs. data_samples (Optional[List[DataSample]]): The annotation data of every samples. Defaults to None. Returns: Dict[str, torch.tensor]: a dictionary of loss components of
loss
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def extract_feat( self, images: torch.Tensor = None, data_samples: List[ActionDataSample] = None, return_texts=True, ) -> Dict[str, torch.Tensor]: """Extract features from the input dict. Args: images (tensor, optional): The images to extract features. Defaults to None. data_samples (list, optional): The data samples containing texts to extract features. Defaults to None. return_texts (bool): Whether to return the tokenized text and the corresponding attention masks. Defaults to True. Returns: Tuple[torch.Tensor]: The output features. If multimodal_backbone is not exist, tuple of torch.Tensor will be returned. """ if data_samples is not None: texts = self.preprocess_text(data_samples) else: texts = None assert images is not None or texts is not None, \ 'At least single modality should be passed as inputs.' results = {} if texts is not None and return_texts: results.update({ 'text_ids': texts.input_ids, 'text_attn_mask': texts.attention_mask, }) # extract image features if images is not None: image_embeds, pooled_image_embeds = self.encode_vision(images) # concat temporal embeds image_embeds = rearrange(image_embeds, 'b t l c -> b (t l) c').contiguous() results['image_embeds'] = image_embeds results['image_feat'] = F.normalize( self.vision_proj(pooled_image_embeds), dim=-1) # extract text features if texts is not None: texts_output = self.text_encoder( texts.input_ids, attention_mask=texts.attention_mask, return_dict=True, mode='text') text_embeds = texts_output.last_hidden_state pooled_text_feat = text_embeds[:, 0] results['text_embeds'] = text_embeds results['text_feat'] = F.normalize( self.text_proj(pooled_text_feat), dim=-1) return results
Extract features from the input dict. Args: images (tensor, optional): The images to extract features. Defaults to None. data_samples (list, optional): The data samples containing texts to extract features. Defaults to None. return_texts (bool): Whether to return the tokenized text and the corresponding attention masks. Defaults to True. Returns: Tuple[torch.Tensor]: The output features. If multimodal_backbone is not exist, tuple of torch.Tensor will be returned.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def compute_score_matrix_i2t(self, img_feats, img_embeds, text_feats, text_embeds, text_atts): """Compare the score matrix for image-to-text retrieval. Every image should compare to all the text features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (N, C). text_atts (torch.Tensor): The input tensor with shape (N, C). Returns: torch.Tensor: Score matrix of image-to-text retrieval. """ # compute i2t sim matrix sim_matrix_i2t = torch.einsum('mld,nd->mln', img_feats, text_feats).mean(1) if self.fast_match: return sim_matrix_i2t score_matrix_i2t = torch.full((img_feats.size(0), text_feats.size(0)), -100.0).to(self.device) for i in track_on_main_process( range(img_feats.size(0)), 'Compute I2T scores...'): sims = sim_matrix_i2t[i] topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) topk_bz = 32 encoder_output = img_embeds[i].repeat(topk_bz, 1, 1) encoder_att = torch.ones( encoder_output.size()[:-1], dtype=torch.long).to(self.device) for j in range(0, self.topk // topk_bz): batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz] output = self.text_encoder( encoder_embeds=text_embeds[batch_topk], attention_mask=text_atts[batch_topk], encoder_hidden_states=encoder_output, encoder_attention_mask=encoder_att, return_dict=True, mode='fusion') score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1] score_matrix_i2t[i, batch_topk] = score return score_matrix_i2t
Compare the score matrix for image-to-text retrieval. Every image should compare to all the text features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (N, C). text_atts (torch.Tensor): The input tensor with shape (N, C). Returns: torch.Tensor: Score matrix of image-to-text retrieval.
compute_score_matrix_i2t
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def compute_score_matrix_t2i(self, img_feats, img_embeds, text_feats, text_embeds, text_atts): """Compare the score matrix for text-to-image retrieval. Every text should compare to all the image features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (M, C). text_atts (torch.Tensor): The input tensor with shape (M, C). Returns: torch.Tensor: Score matrix of text-to-image retrieval. """ # compute t2i sim matrix sim_matrix_t2i = torch.einsum('md,nld->mln', text_feats, img_feats).mean(1) if self.fast_match: return sim_matrix_t2i score_matrix_t2i = torch.full((text_feats.size(0), img_feats.size(0)), -100.0).to(self.device) for i in track_on_main_process( range(text_feats.size(0)), 'Compute T2I scores...'): sims = sim_matrix_t2i[i] topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) topk_bz = 32 for j in range(0, self.topk // topk_bz): batch_topk = topk_idx[j * topk_bz:(j + 1) * topk_bz] encoder_output = img_embeds[batch_topk] encoder_att = torch.ones( encoder_output.size()[:-1], dtype=torch.long).to(self.device) output = self.text_encoder( encoder_embeds=text_embeds[i].repeat(topk_bz, 1, 1), attention_mask=text_atts[i].repeat(topk_bz, 1), encoder_hidden_states=encoder_output, encoder_attention_mask=encoder_att, return_dict=True, mode='fusion') score = self.itm_head(output.last_hidden_state[:, 0, :])[:, 1] score_matrix_t2i[i, batch_topk] = score return score_matrix_t2i
Compare the score matrix for text-to-image retrieval. Every text should compare to all the image features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): The input img embeds tensor with shape (M, C). M stands for numbers of samples on a single GPU. text_feats (torch.Tensor): The input text feats tensor with shape (N, C). N stands for numbers of all samples on all GPUs. text_embeds (torch.Tensor): The input tensor with shape (M, C). text_atts (torch.Tensor): The input tensor with shape (M, C). Returns: torch.Tensor: Score matrix of text-to-image retrieval.
compute_score_matrix_t2i
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def _get_predictions(self, result: torch.Tensor, data_samples: List[ActionDataSample], mode: str = 'i2t'): """Post-process the output of retriever. Args: result (torch.Tensor): Score matrix of single retrieve, either from image or text. data_samples (List[ActionDataSample], optional): The annotation data of every samples. mode (str): Retrieve mode, either `i2t` for image to text, or `t2i` text to image. Defaults to `i2t`. Returns: List[ActionDataSample]: the raw data_samples with the predicted results. """ # create data sample if not exists if data_samples is None: data_samples = [ActionDataSample() for _ in range(result.size(0))] elif mode == 't2i': # Process data samples to align with the num of texts. new_data_samples = [] for sample in data_samples: if isinstance(sample.text, (list, tuple)): texts = sample.text else: texts = [sample.text] for i, text in enumerate(texts): new_sample = ActionDataSample(text=text) if 'gt_video_id' in sample: new_sample.gt_label = sample.gt_video_id[i] new_data_samples.append(new_sample) assert len(new_data_samples) == result.size(0) data_samples = new_data_samples elif mode == 'i2t': for sample in data_samples: if 'gt_text_id' in sample: sample.gt_label = sample.gt_text_id else: raise ValueError(f'Type {mode} is not supported.') for data_sample, score in zip(data_samples, result): idx = score.argmax(keepdim=True).detach() data_sample.set_pred_score(score) data_sample.set_pred_label(idx) return data_samples
Post-process the output of retriever. Args: result (torch.Tensor): Score matrix of single retrieve, either from image or text. data_samples (List[ActionDataSample], optional): The annotation data of every samples. mode (str): Retrieve mode, either `i2t` for image to text, or `t2i` text to image. Defaults to `i2t`. Returns: List[ActionDataSample]: the raw data_samples with the predicted results.
_get_predictions
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def predict(self, inputs, data_samples, **kwargs): """Predict captions from a batch of inputs. Args: images (torch.Tensor): The input images tensor with shape (N, C, ...) in general. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. **kwargs: Other keyword arguments accepted by the ``predict`` Returns: List[ActionDataSample]: Return list of data samples. """ num_options_per_q = len(data_samples[0].caption_options) for sample in data_samples: sample.text = sample.caption_options output = self.extract_feat(inputs, data_samples) text_embeds = output['text_embeds'] text_attn_mask = output['text_attn_mask'] image_embeds = output['image_embeds'] image_feat = output['image_feat'] text_feat = output['text_feat'] # compute similarity between vision feat and caption feat text_feat = rearrange( text_feat, '(b n) c -> b c n', n=num_options_per_q) sim = torch.matmul(image_feat.mean(1, keepdim=True), text_feat).squeeze(1) / self.temp sim = F.softmax(sim, dim=1).flatten() # cross-modal encode encoder_output = image_embeds.repeat_interleave( num_options_per_q, dim=0) image_atts = torch.ones( encoder_output.size()[:-1], dtype=torch.long).to(inputs.device) output = self.text_encoder( encoder_embeds=text_embeds, attention_mask=text_attn_mask, encoder_hidden_states=encoder_output, encoder_attention_mask=image_atts, return_dict=True, mode='fusion', ) itm_embeds = output.last_hidden_state[:, 0] # [CLS] itm_score = F.softmax(self.itm_head(itm_embeds), dim=1)[:, 1] # [bs*5] score = itm_score * self.score_weight + sim * self.similarity_weight pred_answers = score.view(-1, num_options_per_q).max(1)[1].cpu() # assemble predictions ensemble_scores = score.view(-1, num_options_per_q).cpu() # (bsz, 5) out_data_samples = [] for data_sample, ensemble_score, pred_ans in \ zip(data_samples, ensemble_scores, pred_answers): data_sample.pred_label = pred_ans.item() data_sample.score = ensemble_score.numpy() out_data_samples.append(data_sample) return out_data_samples
Predict captions from a batch of inputs. Args: images (torch.Tensor): The input images tensor with shape (N, C, ...) in general. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. **kwargs: Other keyword arguments accepted by the ``predict`` Returns: List[ActionDataSample]: Return list of data samples.
predict
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret_mc.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret_mc.py
Apache-2.0
def init_weights(self) -> None: """Default init_weights for conv(msra) and norm in ConvModule.""" for m in self.modules(): if isinstance(m, nn.Conv3d): xavier_init(m, distribution='uniform') if isinstance(m, nn.BatchNorm3d): constant_init(m, 1) if self.aux_head is not None: self.aux_head.init_weights()
Default init_weights for conv(msra) and norm in ConvModule.
init_weights
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def loss(self, inputs: torch.Tensor, data_samples: SampleList, **kwargs) -> dict: """Calculate losses from a batch of inputs and data samples. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: dict: A dictionary of loss components. """ feats, loss_kwargs = \ self.extract_feat(inputs, data_samples=data_samples) # loss_aux will be a empty dict if `self.with_neck` is False. loss_aux = loss_kwargs.get('loss_aux', dict()) loss_cls = self.cls_head.loss(feats, data_samples, **loss_kwargs) losses = merge_dict(loss_cls, loss_aux) return losses
Calculate losses from a batch of inputs and data samples. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def predict(self, inputs: torch.Tensor, data_samples: SampleList, **kwargs) -> SampleList: """Predict results from a batch of inputs and data samples with post- processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, ) """ feats, predict_kwargs = self.extract_feat(inputs, test_mode=True) predictions = self.cls_head.predict(feats, data_samples, **predict_kwargs) return predictions
Predict results from a batch of inputs and data samples with post- processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includes information such as ``gt_label``. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, )
predict
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def _forward(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> ForwardResults: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``neck`` or ``head`` forward. """ feats, _ = self.extract_feat(inputs, stage=stage) return feats
Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``neck`` or ``head`` forward.
_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'neck', data_samples: SampleList = None, test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``neck``. data_samples (List[:obj:`ActionDataSample`]): Action data samples, which are only needed in training. Defaults to None. test_mode: (bool): Whether in test mode. Defaults to False. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``num_segs``, ``fcn_test``, ``loss_aux``. """ # Record the kwargs required by `loss` and `predict`. loss_predict_kwargs = dict() num_segs = inputs.shape[1] loss_predict_kwargs['num_segs'] = num_segs # [N, num_crops * num_segs, C, H, W] -> # [N * num_crops * num_segs, C, H, W] # `num_crops` is calculated by: # 1) `twice_sample` in `SampleFrames` # 2) `num_sample_positions` in `DenseSampleFrames` # 3) `ThreeCrop/TenCrop` in `test_pipeline` # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` inputs = inputs.view((-1, ) + inputs.shape[2:]) def forward_once(batch_imgs): # Extract features through backbone. if (hasattr(self.backbone, 'features') and self.backbone_from == 'torchvision'): x = self.backbone.features(batch_imgs) elif self.backbone_from == 'timm': x = self.backbone.forward_features(batch_imgs) elif self.backbone_from in ['mmcls', 'mmpretrain']: x = self.backbone(batch_imgs) if isinstance(x, tuple): assert len(x) == 1 x = x[0] else: x = self.backbone(batch_imgs) if self.backbone_from in ['torchvision', 'timm']: if not self.feature_shape: # Transformer-based feature shape: B x L x C. if len(x.shape) == 3: self.feature_shape = 'NLC' # Resnet-based feature shape: B x C x Hs x Ws. elif len(x.shape) == 4: self.feature_shape = 'NCHW' if self.feature_shape == 'NHWC': x = nn.AdaptiveAvgPool2d(1)(x.permute(0, 3, 1, 2)) # B x C x 1 x 1 elif self.feature_shape == 'NCHW': x = nn.AdaptiveAvgPool2d(1)(x) # B x C x 1 x 1 elif self.feature_shape == 'NLC': x = nn.AdaptiveAvgPool1d(1)(x.transpose(1, 2)) # B x C x 1 x = x.reshape((x.shape[0], -1)) # B x C x = x.reshape(x.shape + (1, 1)) # B x C x 1 x 1 return x # Check settings of `fcn_test`. fcn_test = False if test_mode: if self.test_cfg is not None and self.test_cfg.get( 'fcn_test', False): fcn_test = True num_segs = self.test_cfg.get('num_segs', self.backbone.num_segments) loss_predict_kwargs['fcn_test'] = fcn_test # inference with batch size of `max_testing_views` if set if self.test_cfg is not None and self.test_cfg.get( 'max_testing_views', False): max_testing_views = self.test_cfg.get('max_testing_views') assert isinstance(max_testing_views, int) # backbone specify num_segments num_segments = self.backbone.get('num_segments') if num_segments is not None: assert max_testing_views % num_segments == 0, \ 'make sure that max_testing_views is a multiple of ' \ 'num_segments, but got {max_testing_views} and '\ '{num_segments}' total_views = inputs.shape[0] view_ptr = 0 feats = [] while view_ptr < total_views: batch_imgs = inputs[view_ptr:view_ptr + max_testing_views] feat = forward_once(batch_imgs) if self.with_neck: feat, _ = self.neck(feat) feats.append(feat) view_ptr += max_testing_views def recursively_cat(feats): # recursively traverse feats until it's a tensor, # then concat out_feats = [] for e_idx, elem in enumerate(feats[0]): batch_elem = [feat[e_idx] for feat in feats] if not isinstance(elem, torch.Tensor): batch_elem = recursively_cat(batch_elem) else: batch_elem = torch.cat(batch_elem) out_feats.append(batch_elem) return tuple(out_feats) if isinstance(feats[0], tuple): x = recursively_cat(feats) else: x = torch.cat(feats) else: x = forward_once(inputs) else: x = forward_once(inputs) # Return features extracted through backbone. if stage == 'backbone': return x, loss_predict_kwargs loss_aux = dict() if self.with_neck: # x is a tuple with multiple feature maps. x = [ each.reshape((-1, num_segs) + each.shape[1:]).transpose(1, 2).contiguous() for each in x ] x, loss_aux = self.neck(x, data_samples=data_samples) if not fcn_test: x = x.squeeze(2) loss_predict_kwargs['num_segs'] = 1 elif fcn_test: # full convolution (fcn) testing when no neck # [N * num_crops * num_segs, C', H', W'] -> # [N * num_crops, C', num_segs, H', W'] x = x.reshape((-1, num_segs) + x.shape[1:]).transpose(1, 2).contiguous() loss_predict_kwargs['loss_aux'] = loss_aux # Return features extracted through neck. if stage == 'neck': return x, loss_predict_kwargs # Return raw logits through head. if self.with_cls_head and stage == 'head': # [N * num_crops, num_classes] x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features of different stages. Args: inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``neck``. data_samples (List[:obj:`ActionDataSample`]): Action data samples, which are only needed in training. Defaults to None. test_mode: (bool): Whether in test mode. Defaults to False. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``num_segs``, ``fcn_test``, ``loss_aux``.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer2d.py
Apache-2.0
def extract_feat(self, inputs: Tensor, stage: str = 'neck', data_samples: OptSampleList = None, test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'neck'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``. """ # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() num_segs = inputs.shape[1] # [N, num_crops, C, T, H, W] -> # [N * num_crops, C, T, H, W] # `num_crops` is calculated by: # 1) `twice_sample` in `SampleFrames` # 2) `num_sample_positions` in `DenseSampleFrames` # 3) `ThreeCrop/TenCrop` in `test_pipeline` # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` inputs = inputs.view((-1, ) + inputs.shape[2:]) # Check settings of test if test_mode: if self.test_cfg is not None: loss_predict_kwargs['fcn_test'] = self.test_cfg.get( 'fcn_test', False) if self.test_cfg is not None and self.test_cfg.get( 'max_testing_views', False): max_testing_views = self.test_cfg.get('max_testing_views') assert isinstance(max_testing_views, int) total_views = inputs.shape[0] assert num_segs == total_views, ( 'max_testing_views is only compatible ' 'with batch_size == 1') view_ptr = 0 feats = [] while view_ptr < total_views: batch_imgs = inputs[view_ptr:view_ptr + max_testing_views] feat = self.backbone(batch_imgs) if self.with_neck: feat, _ = self.neck(feat) feats.append(feat) view_ptr += max_testing_views def recursively_cat(feats): # recursively traverse feats until it's a tensor, # then concat out_feats = [] for e_idx, elem in enumerate(feats[0]): batch_elem = [feat[e_idx] for feat in feats] if not isinstance(elem, torch.Tensor): batch_elem = recursively_cat(batch_elem) else: batch_elem = torch.cat(batch_elem) out_feats.append(batch_elem) return tuple(out_feats) if isinstance(feats[0], tuple): x = recursively_cat(feats) else: x = torch.cat(feats) else: x = self.backbone(inputs) if self.with_neck: x, _ = self.neck(x) return x, loss_predict_kwargs else: # Return features extracted through backbone x = self.backbone(inputs) if stage == 'backbone': return x, loss_predict_kwargs loss_aux = dict() if self.with_neck: x, loss_aux = self.neck(x, data_samples=data_samples) # Return features extracted through neck loss_predict_kwargs['loss_aux'] = loss_aux if stage == 'neck': return x, loss_predict_kwargs # Return raw logits through head. if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'neck'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d.py
Apache-2.0
def extract_feat(self, inputs: Dict[str, torch.Tensor], stage: str = 'backbone', data_samples: OptSampleList = None, test_mode: bool = False) -> Tuple: """Extract features. Args: inputs (dict[str, torch.Tensor]): The multi-modal input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: tuple[torch.Tensor]: The extracted features. dict: A dict recording the kwargs for downstream pipeline. """ # [N, num_views, C, T, H, W] -> # [N * num_views, C, T, H, W] for m, m_data in inputs.items(): m_data = m_data.reshape((-1, ) + m_data.shape[2:]) inputs[m] = m_data # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() x = self.backbone(**inputs) if stage == 'backbone': return x, loss_predict_kwargs if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features. Args: inputs (dict[str, torch.Tensor]): The multi-modal input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. test_mode (bool): Whether in test mode. Defaults to False. Returns: tuple[torch.Tensor]: The extracted features. dict: A dict recording the kwargs for downstream pipeline.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer3d_mm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d_mm.py
Apache-2.0
def extract_feat(self, batch_inputs: Tensor, stage: str = 'backbone', **kwargs) -> tuple: """Extract features of different stages. Args: batch_inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``backbone``. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. This will be an empty dict in audio recognizer. """ # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() batch_inputs = batch_inputs.view((-1, ) + batch_inputs.shape[2:]) x = self.backbone(batch_inputs) if stage == 'backbone': return x, loss_predict_kwargs if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features of different stages. Args: batch_inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``backbone``. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. This will be an empty dict in audio recognizer.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_audio.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> Tuple: """Extract features at the given stage. Args: inputs (torch.Tensor): The input skeleton with shape of `(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`. stage (str): The stage to output the features. Defaults to ``'backbone'``. Returns: tuple: THe extracted features and a dict recording the kwargs for downstream pipeline, which is an empty dict for the GCN-based recognizer. """ # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() bs, nc = inputs.shape[:2] inputs = inputs.reshape((bs * nc, ) + inputs.shape[2:]) x = self.backbone(inputs) if stage == 'backbone': return x, loss_predict_kwargs if self.with_cls_head and stage == 'head': x = self.cls_head(x, **loss_predict_kwargs) return x, loss_predict_kwargs
Extract features at the given stage. Args: inputs (torch.Tensor): The input skeleton with shape of `(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`. stage (str): The stage to output the features. Defaults to ``'backbone'``. Returns: tuple: THe extracted features and a dict recording the kwargs for downstream pipeline, which is an empty dict for the GCN-based recognizer.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_gcn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_gcn.py
Apache-2.0
def forward(self, *data_samples, mode: str, **kwargs) -> ForwardResults: """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: data_samples: should be a sequence of ``SampleList`` if ``mode="predict"`` or ``mode="loss"``. Each ``SampleList`` is the annotation data of one data source. It should be a single torch tensor if ``mode="tensor"``. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor. """ if mode == 'loss' or mode == 'predict': if mode == 'loss': return self.loss(data_samples) return self.predict(data_samples) elif mode == 'tensor': assert isinstance(data_samples, torch.Tensor) data_ndim = data_samples.ndim if data_ndim not in [4, 5]: info = f'Input is a {data_ndim}D tensor. ' info += 'Only 4D (BCHW) or 5D (BCTHW) tensors are supported!' raise ValueError(info) return self._forward(data_samples, **kwargs)
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictions, which are fully processed to a list of :obj:`ActionDataSample`. - ``loss``: Forward and return a dict of losses according to the given inputs and data samples. Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in the :meth:`train_step`. Args: data_samples: should be a sequence of ``SampleList`` if ``mode="predict"`` or ``mode="loss"``. Each ``SampleList`` is the annotation data of one data source. It should be a single torch tensor if ``mode="tensor"``. mode (str): Return what kind of value. Defaults to ``tensor``. Returns: The return type depends on ``mode``. - If ``mode="tensor"``, return a tensor or a tuple of tensor. - If ``mode="predict"``, return a list of ``ActionDataSample``. - If ``mode="loss"``, return a dict of tensor.
forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def loss(self, data_samples: Sequence[SampleList]) -> dict: """Calculate losses from a batch of inputs and data samples. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: dict: A dictionary of loss components. """ loss_dict = {} for idx, data in enumerate(data_samples): inputs, data_samples = data['inputs'], data['data_samples'] feats = self.extract_feat(inputs) loss_cls = self.cls_head.loss(feats, data_samples) for key in loss_cls: loss_dict[key + f'_{idx}'] = loss_cls[key] return loss_dict
Calculate losses from a batch of inputs and data samples. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def predict(self, data_samples: Sequence[SampleList]) -> SampleList: """Predict results from a batch of inputs and data samples with post- processing. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, ) """ assert len(data_samples) == 1 feats = self.extract_feat(data_samples[0]['inputs'], test_mode=True) predictions = self.cls_head.predict(feats, data_samples[0]['data_samples']) return predictions
Predict results from a batch of inputs and data samples with post- processing. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: List[``ActionDataSample``]: Return the recognition results. The returns value is ``ActionDataSample``, which usually contains ``pred_scores``. And the ``pred_scores`` usually contains following keys. - item (torch.Tensor): Classification scores, has a shape (num_classes, )
predict
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def _forward(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> ForwardResults: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``head`` forward. """ feats, _ = self.extract_feat(inputs, stage=stage) return feats
Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features from ``backbone`` or ``head`` forward.
_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def _run_forward(self, data: Union[dict, tuple, list], mode: str) -> Union[Dict[str, torch.Tensor], list]: """Unpacks data for :meth:`forward` Args: data (dict or tuple or list): Data sampled from dataset. mode (str): Mode of forward. Returns: dict or list: Results of training or testing mode. """ if isinstance(data, dict): data = [data] results = self(*data, mode=mode) elif isinstance(data, (list, tuple)): results = self(*data, mode=mode) else: raise TypeError return results
Unpacks data for :meth:`forward` Args: data (dict or tuple or list): Data sampled from dataset. mode (str): Mode of forward. Returns: dict or list: Results of training or testing mode.
_run_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'backbone', test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``. """ if len(inputs.shape) == 6: inputs = inputs.view((-1, ) + inputs.shape[2:]) # Check settings of test if test_mode: x = self.backbone(inputs) return x else: # Return features extracted through backbone x = self.backbone(inputs) if stage == 'backbone': return x x = self.cls_head(x) return x
Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``.
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def loss(self, x: Union[Tensor, Tuple[Tensor]], rpn_results_list: InstanceList, data_samples: SampleList, **kwargs) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): List of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: Dict[str, Tensor]: A dictionary of loss components. """ assert len(rpn_results_list) == len(data_samples) batch_gt_instances = [] for data_sample in data_samples: batch_gt_instances.append(data_sample.gt_instances) # assign gts and sample proposals num_imgs = len(data_samples) sampling_results = [] for i in range(num_imgs): # rename rpn_results.bboxes to rpn_results.priors rpn_results = rpn_results_list[i] rpn_results.priors = rpn_results.pop('bboxes') assign_result = self.bbox_assigner.assign(rpn_results, batch_gt_instances[i], None) sampling_result = self.bbox_sampler.sample(assign_result, rpn_results, batch_gt_instances[i]) sampling_results.append(sampling_result) # LFB needs meta_info: 'img_key' batch_img_metas = [ data_samples.metainfo for data_samples in data_samples ] losses = dict() # bbox head forward and loss bbox_results = self.bbox_loss(x, sampling_results, batch_img_metas) losses.update(bbox_results['loss_bbox']) return losses
Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): List of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: Dict[str, Tensor]: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def _bbox_forward(self, x: Union[Tensor, Tuple[Tensor]], rois: Tensor, batch_img_metas: List[dict], **kwargs) -> dict: """Box head forward function used in both training and testing. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. """ bbox_feats, global_feat = self.bbox_roi_extractor(x, rois) if self.with_shared_head: bbox_feats = self.shared_head( bbox_feats, feat=global_feat, rois=rois, img_metas=batch_img_metas) cls_score = self.bbox_head(bbox_feats) bbox_results = dict(cls_score=cls_score, bbox_feats=bbox_feats) return bbox_results
Box head forward function used in both training and testing. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features.
_bbox_forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def bbox_loss(self, x: Union[Tensor, Tuple[Tensor]], sampling_results: List[SamplingResult], batch_img_metas: List[dict], **kwargs) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. sampling_results (List[SamplingResult]): Sampling results. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components. """ rois = bbox2roi([res.priors for res in sampling_results]) bbox_results = self._bbox_forward(x, rois, batch_img_metas) bbox_loss_and_target = self.bbox_head.loss_and_target( cls_score=bbox_results['cls_score'], rois=rois, sampling_results=sampling_results, rcnn_train_cfg=self.train_cfg) bbox_results.update(loss_bbox=bbox_loss_and_target['loss_bbox']) return bbox_results
Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. sampling_results (List[SamplingResult]): Sampling results. batch_img_metas (List[dict]): List of image information. Returns: dict[str, Tensor]: Usually returns a dictionary with keys: - `cls_score` (Tensor): Classification scores. - `bbox_pred` (Tensor): Box energies / deltas. - `bbox_feats` (Tensor): Extract bbox RoI features. - `loss_bbox` (dict): A dictionary of bbox loss components.
bbox_loss
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def predict(self, x: Union[Tensor, Tuple[Tensor]], rpn_results_list: InstanceList, data_samples: SampleList, **kwargs) -> InstanceList: """Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): list of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: List[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). """ assert self.with_bbox, 'Bbox head must be implemented.' batch_img_metas = [ data_samples.metainfo for data_samples in data_samples ] if isinstance(x, tuple): x_shape = x[0].shape else: x_shape = x.shape assert x_shape[0] == 1, 'only accept 1 sample at test mode' assert x_shape[0] == len(batch_img_metas) == len(rpn_results_list) results_list = self.predict_bbox( x, batch_img_metas, rpn_results_list, rcnn_test_cfg=self.test_cfg) return results_list
Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): list of region proposals. data_samples (List[:obj:`ActionDataSample`]): The batch data samples. Returns: List[obj:`InstanceData`]: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
predict
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys: - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). """ proposals = [res.bboxes for res in rpn_results_list] rois = bbox2roi(proposals) bbox_results = self._bbox_forward(x, rois, batch_img_metas) # split batch bbox prediction back to each image cls_scores = bbox_results['cls_score'] num_proposals_per_img = tuple(len(p) for p in proposals) rois = rois.split(num_proposals_per_img, 0) cls_scores = cls_scores.split(num_proposals_per_img, 0) result_list = self.bbox_head.predict_by_feat( rois=rois, cls_scores=cls_scores, batch_img_metas=batch_img_metas, rcnn_test_cfg=rcnn_test_cfg) return result_list
Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`InstanceData`]): List of region proposals. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN. Returns: list[:obj:`InstanceData`]: Detection results of each image after the post process. Each item usually contains following keys: - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ).
predict_bbox
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Computes the classification logits given ROI features.""" if self.dropout_before_pool and self.dropout_ratio > 0: x = self.dropout(x) x = self.temporal_pool(x) x = self.spatial_pool(x) if not self.dropout_before_pool and self.dropout_ratio > 0: x = self.dropout(x) x = x.view(x.size(0), -1) cls_score = self.fc_cls(x) return cls_score
Computes the classification logits given ROI features.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def get_recall_prec(pred_vec: Tensor, target_vec: Tensor) -> tuple: """Computes the Recall/Precision for both multi-label and single label scenarios. Note that the computation calculates the micro average. Note, that in both cases, the concept of correct/incorrect is the same. Args: pred_vec (tensor[N x C]): each element is either 0 or 1 target_vec (tensor[N x C]): each element is either 0 or 1 - for single label it is expected that only one element is on (1) although this is not enforced. """ correct = pred_vec & target_vec recall = correct.sum(1) / target_vec.sum(1).float() # Enforce Float prec = correct.sum(1) / (pred_vec.sum(1) + 1e-6) return recall.mean(), prec.mean()
Computes the Recall/Precision for both multi-label and single label scenarios. Note that the computation calculates the micro average. Note, that in both cases, the concept of correct/incorrect is the same. Args: pred_vec (tensor[N x C]): each element is either 0 or 1 target_vec (tensor[N x C]): each element is either 0 or 1 - for single label it is expected that only one element is on (1) although this is not enforced.
get_recall_prec
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def topk_accuracy(self, pred: Tensor, target: Tensor, thr: float = 0.5) -> tuple: """Computes the Top-K Accuracies for both single and multi-label scenarios.""" # Define Target vector: target_bool = target > 0.5 # Branch on Multilabel for computing output classification if self.multilabel: pred = pred.sigmoid() else: pred = pred.softmax(dim=1) # Compute at threshold (K=1 for single) if self.multilabel: pred_bool = pred > thr else: pred_bool = self.topk_to_matrix(pred, 1) recall_thr, prec_thr = self.get_recall_prec(pred_bool, target_bool) # Compute at various K recalls_k, precs_k = [], [] for k in self.topk: pred_bool = self.topk_to_matrix(pred, k) recall, prec = self.get_recall_prec(pred_bool, target_bool) recalls_k.append(recall) precs_k.append(prec) # Return all return recall_thr, prec_thr, recalls_k, precs_k
Computes the Top-K Accuracies for both single and multi-label scenarios.
topk_accuracy
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def loss_and_target(self, cls_score: Tensor, rois: Tensor, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, **kwargs) -> dict: """Calculate the loss based on the features extracted by the bbox head. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: dict: A dictionary of loss components. """ cls_targets = self.get_targets(sampling_results, rcnn_train_cfg) labels, _ = cls_targets losses = dict() # Only use the cls_score if cls_score is not None: if self.background_class: labels = labels[:, 1:] # Get valid labels (ignore first one) cls_score = cls_score[:, 1:] pos_inds = torch.sum(labels, dim=-1) > 0 cls_score = cls_score[pos_inds] labels = labels[pos_inds] # Compute First Recall/Precisions # This has to be done first before normalising the label-space. recall_thr, prec_thr, recall_k, prec_k = self.topk_accuracy( cls_score, labels, thr=0.5) losses['recall@thr=0.5'] = recall_thr losses['prec@thr=0.5'] = prec_thr for i, k in enumerate(self.topk): losses[f'recall@top{k}'] = recall_k[i] losses[f'prec@top{k}'] = prec_k[i] # If Single-label, need to ensure that target labels sum to 1: ie # that they are valid probabilities. if not self.multilabel and self.background_class: labels = labels / labels.sum(dim=1, keepdim=True) # Select Loss function based on single/multi-label # NB. Both losses auto-compute sigmoid/softmax on prediction if self.multilabel: loss_func = F.binary_cross_entropy_with_logits else: loss_func = cross_entropy_loss # Compute loss loss = loss_func(cls_score, labels, reduction='none') pt = torch.exp(-loss) F_loss = self.focal_alpha * (1 - pt)**self.focal_gamma * loss losses['loss_action_cls'] = torch.mean(F_loss) return dict(loss_bbox=losses, bbox_targets=cls_targets)
Calculate the loss based on the features extracted by the bbox head. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. Returns: dict: A dictionary of loss components.
loss_and_target
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def forward(self, feat: Union[Tensor, Tuple[Tensor]], rois: Tensor) -> tuple: """Forward function for extract roi features. Args: feat (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. The shape of feat is N, C, T, H, W. rois (Tensor): Input RoIs, shape (k, 5). Returns: tuple: A tuple of roi features and global features. - roi_feats (Tensor): Extracted bbox RoI features. - feat (Tensor): Global features of the video clip. """ if not isinstance(feat, tuple): feat = (feat, ) if len(feat) >= 2: maxT = max([x.shape[2] for x in feat]) max_shape = (maxT, ) + feat[0].shape[3:] # resize each feat to the largest shape (w. nearest) feat = [F.interpolate(x, max_shape).contiguous() for x in feat] if self.with_temporal_pool: if self.temporal_pool_mode == 'avg': feat = [torch.mean(x, 2, keepdim=True) for x in feat] elif self.temporal_pool_mode == 'max': feat = [torch.max(x, 2, keepdim=True)[0] for x in feat] else: raise NotImplementedError feat = torch.cat(feat, axis=1).contiguous() roi_feats = [] for t in range(feat.size(2)): frame_feat = feat[:, :, t].contiguous() roi_feat = self.roi_layer(frame_feat, rois) if self.with_global: global_feat = self.global_pool(frame_feat.contiguous()) inds = rois[:, 0].type(torch.int64) global_feat = global_feat[inds] roi_feat = torch.cat([roi_feat, global_feat], dim=1) roi_feat = roi_feat.contiguous() roi_feats.append(roi_feat) roi_feats = torch.stack(roi_feats, dim=2) return roi_feats, feat
Forward function for extract roi features. Args: feat (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. The shape of feat is N, C, T, H, W. rois (Tensor): Input RoIs, shape (k, 5). Returns: tuple: A tuple of roi features and global features. - roi_feats (Tensor): Extracted bbox RoI features. - feat (Tensor): Global features of the video clip.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_extractors/single_straight3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_extractors/single_straight3d.py
Apache-2.0
def forward(self, x, feat, rois, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. feat (torch.Tensor): The context feature. rois (torch.Tensor): The regions of interest. Returns: torch.Tensor: The RoI features that have interacted with context feature. """ # We use max pooling by default x = self.max_pool(x) h, w = feat.shape[-2:] x_tile = x.repeat(1, 1, 1, h, w) roi_inds = rois[:, 0].type(torch.long) roi_gfeat = feat[roi_inds] new_feat = torch.cat([x_tile, roi_gfeat], dim=1) new_feat = self.conv1(new_feat) new_feat = self.conv2(new_feat) for conv in self.convs: new_feat = conv(new_feat) return new_feat
Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. feat (torch.Tensor): The context feature. rois (torch.Tensor): The regions of interest. Returns: torch.Tensor: The RoI features that have interacted with context feature.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/acrn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/acrn_head.py
Apache-2.0
def sample_lfb(self, rois, img_metas): """Sample long-term features for each ROI feature.""" inds = rois[:, 0].type(torch.int64) lt_feat_list = [] for ind in inds: lt_feat_list.append(self.lfb[img_metas[ind]['img_key']]) lt_feat = torch.stack(lt_feat_list, dim=0) # [N, lfb_channels, window_size * max_num_feat_per_step] lt_feat = lt_feat.permute(0, 2, 1).contiguous() return lt_feat.unsqueeze(-1).unsqueeze(-1)
Sample long-term features for each ROI feature.
sample_lfb
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def __getitem__(self, img_key): """Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb` is a instance of class LFB.""" video_id, timestamp = img_key.split(',') return self.sample_long_term_features(video_id, int(timestamp))
Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb` is a instance of class LFB.
__getitem__
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb.py
Apache-2.0
def forward(self, x, rois, img_metas, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. rois (torch.Tensor): The regions of interest. img_metas (List[dict]): The meta information of the data. Returns: torch.Tensor: The RoI features that have interacted with context """ # [N, C, 1, 1, 1] features = self.temporal_pool(x) features = self.spatial_pool(features) if self.use_half_precision: features = features.half() inds = rois[:, 0].type(torch.int64) for ind in inds: self.all_metadata.append(img_metas[ind]['img_key']) self.all_features += list(features) # Return the input directly and doesn't affect the input. return x
Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. rois (torch.Tensor): The regions of interest. img_metas (List[dict]): The meta information of the data. Returns: torch.Tensor: The RoI features that have interacted with context
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_layers``.""" if self.frozen_layers >= 0: top_layers = [ 'ln_final', 'text_projection', 'logit_scale', 'visual.ln_post', 'visual.proj' ] mid_layers = [ 'visual.transformer.resblocks', 'transformer.resblocks' ] for name, param in self.clip.named_parameters(): if any(name.find(n) == 0 for n in top_layers): continue elif any(name.find(n) == 0 for n in mid_layers): layer_n = int(name.split('.resblocks.')[1].split('.')[0]) if layer_n >= self.frozen_layers: continue param.requires_grad = False
Prevent all the parameters from being optimized before ``self.frozen_layers``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0