xautodl/tests/test_super_att.py
2021-03-21 13:26:52 +08:00

72 lines
2.6 KiB
Python

#####################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.03 #
#####################################################
# pytest ./tests/test_super_att.py -s #
#####################################################
import sys, random
import unittest
from parameterized import parameterized
import pytest
from pathlib import Path
lib_dir = (Path(__file__).parent / ".." / "lib").resolve()
print("library path: {:}".format(lib_dir))
if str(lib_dir) not in sys.path:
sys.path.insert(0, str(lib_dir))
import torch
from xlayers import super_core
import spaces
class TestSuperAttention(unittest.TestCase):
"""Test the super attention layer."""
def _internal_func(self, inputs, model):
outputs = model(inputs)
abstract_space = model.abstract_search_space
print(
"The abstract search space for SuperAttention is:\n{:}".format(
abstract_space
)
)
abstract_space.clean_last()
abstract_child = abstract_space.random(reuse_last=True)
print("The abstract child program is:\n{:}".format(abstract_child))
model.set_super_run_type(super_core.SuperRunMode.Candidate)
model.apply_candidate(abstract_child)
outputs = model(inputs)
return abstract_child, outputs
def test_super_attention(self):
proj_dim = spaces.Categorical(12, 24, 36)
num_heads = spaces.Categorical(2, 4, 6)
model = super_core.SuperAttention(10, proj_dim, num_heads)
print(model)
model.apply_verbose(True)
inputs = torch.rand(4, 20, 10) # batch size, sequence length, channel
abstract_child, outputs = self._internal_func(inputs, model)
output_shape = (4, 20, abstract_child["proj"]["_out_features"].value)
self.assertEqual(tuple(outputs.shape), output_shape)
@parameterized.expand([[6], [12], [24], [48]])
def test_transformer_encoder(self, input_dim):
output_dim = spaces.Categorical(12, 24, 36)
model = super_core.SuperTransformerEncoderLayer(
input_dim,
output_dim=output_dim,
num_heads=spaces.Categorical(2, 4, 6),
mlp_hidden_multiplier=spaces.Categorical(1, 2, 4),
)
print(model)
model.apply_verbose(True)
inputs = torch.rand(4, 20, input_dim)
abstract_child, outputs = self._internal_func(inputs, model)
output_shape = (
4,
20,
output_dim.abstract(reuse_last=True).random(reuse_last=True).value,
)
self.assertEqual(tuple(outputs.shape), output_shape)