Fix bugs in ViT
This commit is contained in:
		
							
								
								
									
										277
									
								
								notebooks/spaces/test-transformer-encoder.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										277
									
								
								notebooks/spaces/test-transformer-encoder.ipynb
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,277 @@ | ||||
| { | ||||
|  "cells": [ | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 1, | ||||
|    "id": "3f754c96", | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import torch\n", | ||||
|     "from xautodl import spaces\n", | ||||
|     "from xautodl.xlayers import super_core\n", | ||||
|     "\n", | ||||
|     "def _create_stel(input_dim, output_dim, order):\n", | ||||
|     "    return super_core.SuperSequential(\n", | ||||
|     "        super_core.SuperLinear(input_dim, output_dim),\n", | ||||
|     "        super_core.SuperTransformerEncoderLayer(\n", | ||||
|     "            output_dim,\n", | ||||
|     "            num_heads=spaces.Categorical(2, 4, 6),\n", | ||||
|     "            mlp_hidden_multiplier=spaces.Categorical(1, 2, 4),\n", | ||||
|     "            order=order,\n", | ||||
|     "        ),\n", | ||||
|     "    )" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 2, | ||||
|    "id": "81d42f4b", | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "batch, seq_dim, input_dim = 1, 4, 6\n", | ||||
|     "order = super_core.LayerOrder.PreNorm" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 3, | ||||
|    "id": "8056b37c", | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "SuperSequential(\n", | ||||
|       "  (0): SuperSequential(\n", | ||||
|       "    (0): SuperLinear(in_features=6, out_features=Categorical(candidates=[12, 24, 36], default_index=None), bias=True)\n", | ||||
|       "    (1): SuperTransformerEncoderLayer(\n", | ||||
|       "      (norm1): SuperLayerNorm1D(shape=Categorical(candidates=[12, 24, 36], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "      (mha): SuperSelfAttention(\n", | ||||
|       "        input_dim=Categorical(candidates=[12, 24, 36], default_index=None), proj_dim=Categorical(candidates=[12, 24, 36], default_index=None), num_heads=Categorical(candidates=[2, 4, 6], default_index=None), mask=False, infinity=1000000000.0\n", | ||||
|       "        (q_fc): SuperLinear(in_features=Categorical(candidates=[12, 24, 36], default_index=None), out_features=Categorical(candidates=[12, 24, 36], default_index=None), bias=False)\n", | ||||
|       "        (k_fc): SuperLinear(in_features=Categorical(candidates=[12, 24, 36], default_index=None), out_features=Categorical(candidates=[12, 24, 36], default_index=None), bias=False)\n", | ||||
|       "        (v_fc): SuperLinear(in_features=Categorical(candidates=[12, 24, 36], default_index=None), out_features=Categorical(candidates=[12, 24, 36], default_index=None), bias=False)\n", | ||||
|       "        (attn_drop): SuperDrop(p=0.0, dims=[-1, -1, -1, -1], recover=True)\n", | ||||
|       "      )\n", | ||||
|       "      (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "      (norm2): SuperLayerNorm1D(shape=Categorical(candidates=[12, 24, 36], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "      (mlp): SuperMLPv2(\n", | ||||
|       "        in_features=Categorical(candidates=[12, 24, 36], default_index=None), hidden_multiplier=Categorical(candidates=[1, 2, 4], default_index=None), out_features=Categorical(candidates=[12, 24, 36], default_index=None), drop=None, fc1 -> act -> drop -> fc2 -> drop,\n", | ||||
|       "        (_params): ParameterDict(\n", | ||||
|       "            (fc1_super_weight): Parameter containing: [torch.FloatTensor of size 144x36]\n", | ||||
|       "            (fc1_super_bias): Parameter containing: [torch.FloatTensor of size 144]\n", | ||||
|       "            (fc2_super_weight): Parameter containing: [torch.FloatTensor of size 36x144]\n", | ||||
|       "            (fc2_super_bias): Parameter containing: [torch.FloatTensor of size 36]\n", | ||||
|       "        )\n", | ||||
|       "        (act): GELU()\n", | ||||
|       "        (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "      )\n", | ||||
|       "    )\n", | ||||
|       "  )\n", | ||||
|       "  (1): SuperSequential(\n", | ||||
|       "    (0): SuperLinear(in_features=Categorical(candidates=[12, 24, 36], default_index=None), out_features=Categorical(candidates=[24, 36, 48], default_index=None), bias=True)\n", | ||||
|       "    (1): SuperTransformerEncoderLayer(\n", | ||||
|       "      (norm1): SuperLayerNorm1D(shape=Categorical(candidates=[24, 36, 48], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "      (mha): SuperSelfAttention(\n", | ||||
|       "        input_dim=Categorical(candidates=[24, 36, 48], default_index=None), proj_dim=Categorical(candidates=[24, 36, 48], default_index=None), num_heads=Categorical(candidates=[2, 4, 6], default_index=None), mask=False, infinity=1000000000.0\n", | ||||
|       "        (q_fc): SuperLinear(in_features=Categorical(candidates=[24, 36, 48], default_index=None), out_features=Categorical(candidates=[24, 36, 48], default_index=None), bias=False)\n", | ||||
|       "        (k_fc): SuperLinear(in_features=Categorical(candidates=[24, 36, 48], default_index=None), out_features=Categorical(candidates=[24, 36, 48], default_index=None), bias=False)\n", | ||||
|       "        (v_fc): SuperLinear(in_features=Categorical(candidates=[24, 36, 48], default_index=None), out_features=Categorical(candidates=[24, 36, 48], default_index=None), bias=False)\n", | ||||
|       "        (attn_drop): SuperDrop(p=0.0, dims=[-1, -1, -1, -1], recover=True)\n", | ||||
|       "      )\n", | ||||
|       "      (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "      (norm2): SuperLayerNorm1D(shape=Categorical(candidates=[24, 36, 48], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "      (mlp): SuperMLPv2(\n", | ||||
|       "        in_features=Categorical(candidates=[24, 36, 48], default_index=None), hidden_multiplier=Categorical(candidates=[1, 2, 4], default_index=None), out_features=Categorical(candidates=[24, 36, 48], default_index=None), drop=None, fc1 -> act -> drop -> fc2 -> drop,\n", | ||||
|       "        (_params): ParameterDict(\n", | ||||
|       "            (fc1_super_weight): Parameter containing: [torch.FloatTensor of size 192x48]\n", | ||||
|       "            (fc1_super_bias): Parameter containing: [torch.FloatTensor of size 192]\n", | ||||
|       "            (fc2_super_weight): Parameter containing: [torch.FloatTensor of size 48x192]\n", | ||||
|       "            (fc2_super_bias): Parameter containing: [torch.FloatTensor of size 48]\n", | ||||
|       "        )\n", | ||||
|       "        (act): GELU()\n", | ||||
|       "        (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "      )\n", | ||||
|       "    )\n", | ||||
|       "  )\n", | ||||
|       "  (2): SuperSequential(\n", | ||||
|       "    (0): SuperLinear(in_features=Categorical(candidates=[24, 36, 48], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=True)\n", | ||||
|       "    (1): SuperTransformerEncoderLayer(\n", | ||||
|       "      (norm1): SuperLayerNorm1D(shape=Categorical(candidates=[36, 72, 100], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "      (mha): SuperSelfAttention(\n", | ||||
|       "        input_dim=Categorical(candidates=[36, 72, 100], default_index=None), proj_dim=Categorical(candidates=[36, 72, 100], default_index=None), num_heads=Categorical(candidates=[2, 4, 6], default_index=None), mask=False, infinity=1000000000.0\n", | ||||
|       "        (q_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "        (k_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "        (v_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "        (attn_drop): SuperDrop(p=0.0, dims=[-1, -1, -1, -1], recover=True)\n", | ||||
|       "      )\n", | ||||
|       "      (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "      (norm2): SuperLayerNorm1D(shape=Categorical(candidates=[36, 72, 100], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "      (mlp): SuperMLPv2(\n", | ||||
|       "        in_features=Categorical(candidates=[36, 72, 100], default_index=None), hidden_multiplier=Categorical(candidates=[1, 2, 4], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), drop=None, fc1 -> act -> drop -> fc2 -> drop,\n", | ||||
|       "        (_params): ParameterDict(\n", | ||||
|       "            (fc1_super_weight): Parameter containing: [torch.FloatTensor of size 400x100]\n", | ||||
|       "            (fc1_super_bias): Parameter containing: [torch.FloatTensor of size 400]\n", | ||||
|       "            (fc2_super_weight): Parameter containing: [torch.FloatTensor of size 100x400]\n", | ||||
|       "            (fc2_super_bias): Parameter containing: [torch.FloatTensor of size 100]\n", | ||||
|       "        )\n", | ||||
|       "        (act): GELU()\n", | ||||
|       "        (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "      )\n", | ||||
|       "    )\n", | ||||
|       "  )\n", | ||||
|       ")\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "out1_dim = spaces.Categorical(12, 24, 36)\n", | ||||
|     "out2_dim = spaces.Categorical(24, 36, 48)\n", | ||||
|     "out3_dim = spaces.Categorical(36, 72, 100)\n", | ||||
|     "layer1 = _create_stel(input_dim, out1_dim, order)\n", | ||||
|     "layer2 = _create_stel(out1_dim, out2_dim, order)\n", | ||||
|     "layer3 = _create_stel(out2_dim, out3_dim, order)\n", | ||||
|     "model = super_core.SuperSequential(layer1, layer2, layer3)\n", | ||||
|     "print(model)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "id": "4fd53a7c", | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "> \u001b[0;32m/Users/xuanyidong/anaconda3/lib/python3.8/site-packages/xautodl-0.9.9-py3.8.egg/xautodl/xlayers/super_transformer.py\u001b[0m(116)\u001b[0;36mforward_raw\u001b[0;34m()\u001b[0m\n", | ||||
|       "\u001b[0;32m    114 \u001b[0;31m              \u001b[0;32mimport\u001b[0m \u001b[0mpdb\u001b[0m\u001b[0;34m;\u001b[0m \u001b[0mpdb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||
|       "\u001b[0m\u001b[0;32m    115 \u001b[0;31m            \u001b[0;31m# feed-forward layer -- MLP\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||
|       "\u001b[0m\u001b[0;32m--> 116 \u001b[0;31m            \u001b[0my\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm2\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||
|       "\u001b[0m\u001b[0;32m    117 \u001b[0;31m            \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mx\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmlp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||
|       "\u001b[0m\u001b[0;32m    118 \u001b[0;31m        \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_order\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mLayerOrder\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mPostNorm\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||
|       "\u001b[0m\n", | ||||
|       "ipdb> print(self)\n", | ||||
|       "SuperTransformerEncoderLayer(\n", | ||||
|       "  (norm1): SuperLayerNorm1D(shape=Categorical(candidates=[36, 72, 100], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "  (mha): SuperSelfAttention(\n", | ||||
|       "    input_dim=Categorical(candidates=[36, 72, 100], default_index=None), proj_dim=Categorical(candidates=[36, 72, 100], default_index=None), num_heads=Categorical(candidates=[2, 4, 6], default_index=None), mask=False, infinity=1000000000.0\n", | ||||
|       "    (q_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "    (k_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "    (v_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "    (attn_drop): SuperDrop(p=0.0, dims=[-1, -1, -1, -1], recover=True)\n", | ||||
|       "  )\n", | ||||
|       "  (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "  (norm2): SuperLayerNorm1D(shape=Categorical(candidates=[36, 72, 100], default_index=None), eps=1e-06, elementwise_affine=True)\n", | ||||
|       "  (mlp): SuperMLPv2(\n", | ||||
|       "    in_features=Categorical(candidates=[36, 72, 100], default_index=None), hidden_multiplier=Categorical(candidates=[1, 2, 4], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), drop=None, fc1 -> act -> drop -> fc2 -> drop,\n", | ||||
|       "    (_params): ParameterDict(\n", | ||||
|       "        (fc1_super_weight): Parameter containing: [torch.FloatTensor of size 400x100]\n", | ||||
|       "        (fc1_super_bias): Parameter containing: [torch.FloatTensor of size 400]\n", | ||||
|       "        (fc2_super_weight): Parameter containing: [torch.FloatTensor of size 100x400]\n", | ||||
|       "        (fc2_super_bias): Parameter containing: [torch.FloatTensor of size 100]\n", | ||||
|       "    )\n", | ||||
|       "    (act): GELU()\n", | ||||
|       "    (drop): Dropout(p=0.0, inplace=False)\n", | ||||
|       "  )\n", | ||||
|       ")\n", | ||||
|       "ipdb> print(inputs.shape)\n", | ||||
|       "torch.Size([1, 4, 100])\n", | ||||
|       "ipdb> print(x.shape)\n", | ||||
|       "torch.Size([1, 4, 96])\n", | ||||
|       "ipdb> print(self.mha)\n", | ||||
|       "SuperSelfAttention(\n", | ||||
|       "  input_dim=Categorical(candidates=[36, 72, 100], default_index=None), proj_dim=Categorical(candidates=[36, 72, 100], default_index=None), num_heads=Categorical(candidates=[2, 4, 6], default_index=None), mask=False, infinity=1000000000.0\n", | ||||
|       "  (q_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "  (k_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "  (v_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "  (attn_drop): SuperDrop(p=0.0, dims=[-1, -1, -1, -1], recover=True)\n", | ||||
|       ")\n", | ||||
|       "ipdb> print(self.mha.candidate)\n", | ||||
|       "*** AttributeError: 'SuperSelfAttention' object has no attribute 'candidate'\n", | ||||
|       "ipdb> print(self.mha.abstract_candidate)\n", | ||||
|       "*** AttributeError: 'SuperSelfAttention' object has no attribute 'abstract_candidate'\n", | ||||
|       "ipdb> print(self.mha._abstract_child)\n", | ||||
|       "None\n", | ||||
|       "ipdb> print(self.abstract_child)\n", | ||||
|       "None\n", | ||||
|       "ipdb> print(self.abstract_child.abstract_child)\n", | ||||
|       "*** AttributeError: 'NoneType' object has no attribute 'abstract_child'\n", | ||||
|       "ipdb> print(self.mha)\n", | ||||
|       "SuperSelfAttention(\n", | ||||
|       "  input_dim=Categorical(candidates=[36, 72, 100], default_index=None), proj_dim=Categorical(candidates=[36, 72, 100], default_index=None), num_heads=Categorical(candidates=[2, 4, 6], default_index=None), mask=False, infinity=1000000000.0\n", | ||||
|       "  (q_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "  (k_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "  (v_fc): SuperLinear(in_features=Categorical(candidates=[36, 72, 100], default_index=None), out_features=Categorical(candidates=[36, 72, 100], default_index=None), bias=False)\n", | ||||
|       "  (attn_drop): SuperDrop(p=0.0, dims=[-1, -1, -1, -1], recover=True)\n", | ||||
|       ")\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "inputs = torch.rand(batch, seq_dim, input_dim)\n", | ||||
|     "outputs = model(inputs)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "id": "05332b98", | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "abstract_space = model.abstract_search_space\n", | ||||
|     "abstract_space.clean_last()\n", | ||||
|     "abstract_child = abstract_space.random(reuse_last=True)\n", | ||||
|     "# print(\"The abstract child program is:\\n{:}\".format(abstract_child))\n", | ||||
|     "model.enable_candidate()\n", | ||||
|     "model.set_super_run_type(super_core.SuperRunMode.Candidate)\n", | ||||
|     "model.apply_candidate(abstract_child)\n", | ||||
|     "outputs = model(inputs)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "id": "3289f938", | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "print(outputs.shape)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "id": "36951cdf", | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [] | ||||
|   } | ||||
|  ], | ||||
|  "metadata": { | ||||
|   "kernelspec": { | ||||
|    "display_name": "Python 3", | ||||
|    "language": "python", | ||||
|    "name": "python3" | ||||
|   }, | ||||
|   "language_info": { | ||||
|    "codemirror_mode": { | ||||
|     "name": "ipython", | ||||
|     "version": 3 | ||||
|    }, | ||||
|    "file_extension": ".py", | ||||
|    "mimetype": "text/x-python", | ||||
|    "name": "python", | ||||
|    "nbconvert_exporter": "python", | ||||
|    "pygments_lexer": "ipython3", | ||||
|    "version": "3.8.8" | ||||
|   } | ||||
|  }, | ||||
|  "nbformat": 4, | ||||
|  "nbformat_minor": 5 | ||||
| } | ||||
		Reference in New Issue
	
	Block a user