Start prototype
This commit is contained in:
		
							
								
								
									
										2
									
								
								lib/layers/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								lib/layers/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| from .drop import DropBlock2d, DropPath | ||||
| from .weight_init import trunc_normal_ | ||||
							
								
								
									
										169
									
								
								lib/layers/drop.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										169
									
								
								lib/layers/drop.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,169 @@ | ||||
| """ Borrowed from https://github.com/rwightman/pytorch-image-models | ||||
| DropBlock, DropPath | ||||
|  | ||||
| PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. | ||||
|  | ||||
| Papers: | ||||
| DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) | ||||
|  | ||||
| Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) | ||||
|  | ||||
| Code: | ||||
| DropBlock impl inspired by two Tensorflow impl that I liked: | ||||
|  - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 | ||||
|  - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py | ||||
|  | ||||
| Hacked together by / Copyright 2020 Ross Wightman | ||||
| """ | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| import torch.nn.functional as F | ||||
|  | ||||
|  | ||||
| def drop_block_2d( | ||||
|     x, drop_prob: float = 0.1, block_size: int = 7,  gamma_scale: float = 1.0, | ||||
|     with_noise: bool = False, inplace: bool = False, batchwise: bool = False): | ||||
|   """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf | ||||
|  | ||||
|   DropBlock with an experimental gaussian noise option. This layer has been tested on a few training | ||||
|   runs with success, but needs further validation and possibly optimization for lower runtime impact. | ||||
|   """ | ||||
|   B, C, H, W = x.shape | ||||
|   total_size = W * H | ||||
|   clipped_block_size = min(block_size, min(W, H)) | ||||
|   # seed_drop_rate, the gamma parameter | ||||
|   gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( | ||||
|     (W - block_size + 1) * (H - block_size + 1)) | ||||
|  | ||||
|   # Forces the block to be inside the feature map. | ||||
|   w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) | ||||
|   valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ | ||||
|                 ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) | ||||
|   valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) | ||||
|  | ||||
|   if batchwise: | ||||
|     # one mask for whole batch, quite a bit faster | ||||
|     uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) | ||||
|   else: | ||||
|     uniform_noise = torch.rand_like(x) | ||||
|   block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) | ||||
|   block_mask = -F.max_pool2d( | ||||
|     -block_mask, | ||||
|     kernel_size=clipped_block_size,  # block_size, | ||||
|     stride=1, | ||||
|     padding=clipped_block_size // 2) | ||||
|  | ||||
|   if with_noise: | ||||
|     normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) | ||||
|     if inplace: | ||||
|       x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) | ||||
|     else: | ||||
|       x = x * block_mask + normal_noise * (1 - block_mask) | ||||
|   else: | ||||
|     normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) | ||||
|     if inplace: | ||||
|       x.mul_(block_mask * normalize_scale) | ||||
|     else: | ||||
|       x = x * block_mask * normalize_scale | ||||
|   return x | ||||
|  | ||||
|  | ||||
| def drop_block_fast_2d( | ||||
|     x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, | ||||
|     gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): | ||||
|   """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf | ||||
|  | ||||
|   DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid | ||||
|   block mask at edges. | ||||
|   """ | ||||
|   B, C, H, W = x.shape | ||||
|   total_size = W * H | ||||
|   clipped_block_size = min(block_size, min(W, H)) | ||||
|   gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( | ||||
|       (W - block_size + 1) * (H - block_size + 1)) | ||||
|  | ||||
|   if batchwise: | ||||
|     # one mask for whole batch, quite a bit faster | ||||
|     block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma | ||||
|   else: | ||||
|     # mask per batch element | ||||
|     block_mask = torch.rand_like(x) < gamma | ||||
|   block_mask = F.max_pool2d( | ||||
|     block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) | ||||
|  | ||||
|   if with_noise: | ||||
|     normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) | ||||
|     if inplace: | ||||
|       x.mul_(1. - block_mask).add_(normal_noise * block_mask) | ||||
|     else: | ||||
|       x = x * (1. - block_mask) + normal_noise * block_mask | ||||
|   else: | ||||
|     block_mask = 1 - block_mask | ||||
|     normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype) | ||||
|     if inplace: | ||||
|       x.mul_(block_mask * normalize_scale) | ||||
|     else: | ||||
|       x = x * block_mask * normalize_scale | ||||
|   return x | ||||
|  | ||||
|  | ||||
| class DropBlock2d(nn.Module): | ||||
|   """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf | ||||
|   """ | ||||
|   def __init__(self, | ||||
|          drop_prob=0.1, | ||||
|          block_size=7, | ||||
|          gamma_scale=1.0, | ||||
|          with_noise=False, | ||||
|          inplace=False, | ||||
|          batchwise=False, | ||||
|          fast=True): | ||||
|     super(DropBlock2d, self).__init__() | ||||
|     self.drop_prob = drop_prob | ||||
|     self.gamma_scale = gamma_scale | ||||
|     self.block_size = block_size | ||||
|     self.with_noise = with_noise | ||||
|     self.inplace = inplace | ||||
|     self.batchwise = batchwise | ||||
|     self.fast = fast  # FIXME finish comparisons of fast vs not | ||||
|  | ||||
|   def forward(self, x): | ||||
|     if not self.training or not self.drop_prob: | ||||
|       return x | ||||
|     if self.fast: | ||||
|       return drop_block_fast_2d( | ||||
|         x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) | ||||
|     else: | ||||
|       return drop_block_2d( | ||||
|         x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) | ||||
|  | ||||
|  | ||||
| def drop_path(x, drop_prob: float = 0., training: bool = False): | ||||
|   """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). | ||||
|  | ||||
|   This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, | ||||
|   the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... | ||||
|   See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for | ||||
|   changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use | ||||
|   'survival rate' as the argument. | ||||
|  | ||||
|   """ | ||||
|   if drop_prob == 0. or not training: | ||||
|     return x | ||||
|   keep_prob = 1 - drop_prob | ||||
|   shape = (x.shape[0],) + (1,) * (x.ndim - 1)  # work with diff dim tensors, not just 2D ConvNets | ||||
|   random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) | ||||
|   random_tensor.floor_()  # binarize | ||||
|   output = x.div(keep_prob) * random_tensor | ||||
|   return output | ||||
|  | ||||
|  | ||||
| class DropPath(nn.Module): | ||||
|   """Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks). | ||||
|   """ | ||||
|   def __init__(self, drop_prob=None): | ||||
|     super(DropPath, self).__init__() | ||||
|     self.drop_prob = drop_prob | ||||
|  | ||||
|   def forward(self, x): | ||||
|     return drop_path(x, self.drop_prob, self.training) | ||||
							
								
								
									
										61
									
								
								lib/layers/weight_init.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								lib/layers/weight_init.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,61 @@ | ||||
| # Borrowed from https://github.com/rwightman/pytorch-image-models | ||||
| import torch | ||||
| import math | ||||
| import warnings | ||||
|  | ||||
|  | ||||
| def _no_grad_trunc_normal_(tensor, mean, std, a, b): | ||||
|   # Cut & paste from PyTorch official master until it's in a few official releases - RW | ||||
|   # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf | ||||
|   def norm_cdf(x): | ||||
|     # Computes standard normal cumulative distribution function | ||||
|     return (1. + math.erf(x / math.sqrt(2.))) / 2. | ||||
|  | ||||
|   if (mean < a - 2 * std) or (mean > b + 2 * std): | ||||
|     warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " | ||||
|                   "The distribution of values may be incorrect.", | ||||
|                   stacklevel=2) | ||||
|  | ||||
|   with torch.no_grad(): | ||||
|     # Values are generated by using a truncated uniform distribution and | ||||
|     # then using the inverse CDF for the normal distribution. | ||||
|     # Get upper and lower cdf values | ||||
|     l = norm_cdf((a - mean) / std) | ||||
|     u = norm_cdf((b - mean) / std) | ||||
|  | ||||
|     # Uniformly fill tensor with values from [l, u], then translate to | ||||
|     # [2l-1, 2u-1]. | ||||
|     tensor.uniform_(2 * l - 1, 2 * u - 1) | ||||
|  | ||||
|     # Use inverse cdf transform for normal distribution to get truncated | ||||
|     # standard normal | ||||
|     tensor.erfinv_() | ||||
|  | ||||
|     # Transform to proper mean, std | ||||
|     tensor.mul_(std * math.sqrt(2.)) | ||||
|     tensor.add_(mean) | ||||
|  | ||||
|     # Clamp to ensure it's in the proper range | ||||
|     tensor.clamp_(min=a, max=b) | ||||
|     return tensor | ||||
|  | ||||
|  | ||||
| def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): | ||||
|   # type: (Tensor, float, float, float, float) -> Tensor | ||||
|   r"""Fills the input Tensor with values drawn from a truncated | ||||
|   normal distribution. The values are effectively drawn from the | ||||
|   normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` | ||||
|   with values outside :math:`[a, b]` redrawn until they are within | ||||
|   the bounds. The method used for generating the random values works | ||||
|   best when :math:`a \leq \text{mean} \leq b`. | ||||
|   Args: | ||||
|     tensor: an n-dimensional `torch.Tensor` | ||||
|     mean: the mean of the normal distribution | ||||
|     std: the standard deviation of the normal distribution | ||||
|     a: the minimum cutoff value | ||||
|     b: the maximum cutoff value | ||||
|   Examples: | ||||
|     >>> w = torch.empty(3, 5) | ||||
|     >>> nn.init.trunc_normal_(w) | ||||
|   """ | ||||
|   return _no_grad_trunc_normal_(tensor, mean, std, a, b) | ||||
		Reference in New Issue
	
	Block a user