Update Q models
This commit is contained in:
		| @@ -1,2 +1,4 @@ | ||||
| from .drop import DropBlock2d, DropPath | ||||
| from .weight_init import trunc_normal_ | ||||
|  | ||||
| from .positional_embedding import PositionalEncoder | ||||
|   | ||||
							
								
								
									
										29
									
								
								lib/layers/positional_embedding.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								lib/layers/positional_embedding.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| import math | ||||
|  | ||||
| class PositionalEncoder(nn.Module): | ||||
|   # Attention Is All You Need: https://arxiv.org/pdf/1706.03762.pdf | ||||
|  | ||||
|   def __init__(self, d_model, max_seq_len): | ||||
|     super(PositionalEncoder, self).__init__() | ||||
|     self.d_model = d_model | ||||
|     # create constant 'pe' matrix with values dependant on  | ||||
|     # pos and i | ||||
|     pe = torch.zeros(max_seq_len, d_model) | ||||
|     for pos in range(max_seq_len): | ||||
|       for i in range(0, d_model): | ||||
|         div = 10000 ** ((i // 2) * 2 / d_model) | ||||
|         value = pos / div | ||||
|         if i % 2 == 0: | ||||
|           pe[pos, i] = math.sin(value) | ||||
|         else: | ||||
|           pe[pos, i] = math.cos(value) | ||||
|     pe = pe.unsqueeze(0) | ||||
|     self.register_buffer('pe', pe) | ||||
|   | ||||
|    | ||||
|   def forward(self, x): | ||||
|     batch, seq, fdim = x.shape[:3] | ||||
|     embeddings = self.pe[:, :seq, :fdim] | ||||
|     return x + embeddings | ||||
		Reference in New Issue
	
	Block a user