Add visualize codes for Q
This commit is contained in:
		| @@ -1,3 +1,9 @@ | ||||
| ##################################################### | ||||
| # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019.01 # | ||||
| ##################################################### | ||||
| # This directory contains some ad-hoc functions, classes, etc. | ||||
| # It will be re-formulated in the future. | ||||
| ##################################################### | ||||
| from .evaluation_utils import obtain_accuracy | ||||
| from .gpu_manager import GPUManager | ||||
| from .flop_benchmark import get_model_infos, count_parameters, count_parameters_in_MB | ||||
|   | ||||
| @@ -76,10 +76,12 @@ def rotate2affine(degree): | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def normalize_points(shape, points): | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, "invalid shape : {:}".format( | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len( | ||||
|         shape | ||||
|     ) | ||||
|     assert isinstance(points, torch.Tensor) and (points.shape[0] == 2), "points are wrong : {:}".format(points.shape) | ||||
|     ) == 2, "invalid shape : {:}".format(shape) | ||||
|     assert isinstance(points, torch.Tensor) and ( | ||||
|         points.shape[0] == 2 | ||||
|     ), "points are wrong : {:}".format(points.shape) | ||||
|     (H, W), points = shape, points.clone() | ||||
|     points[0, :] = normalize_L(points[0, :], W) | ||||
|     points[1, :] = normalize_L(points[1, :], H) | ||||
| @@ -88,10 +90,12 @@ def normalize_points(shape, points): | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def normalize_points_batch(shape, points): | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, "invalid shape : {:}".format( | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len( | ||||
|         shape | ||||
|     ) | ||||
|     assert isinstance(points, torch.Tensor) and (points.size(-1) == 2), "points are wrong : {:}".format(points.shape) | ||||
|     ) == 2, "invalid shape : {:}".format(shape) | ||||
|     assert isinstance(points, torch.Tensor) and ( | ||||
|         points.size(-1) == 2 | ||||
|     ), "points are wrong : {:}".format(points.shape) | ||||
|     (H, W), points = shape, points.clone() | ||||
|     x = normalize_L(points[..., 0], W) | ||||
|     y = normalize_L(points[..., 1], H) | ||||
| @@ -100,10 +104,12 @@ def normalize_points_batch(shape, points): | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def denormalize_points(shape, points): | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, "invalid shape : {:}".format( | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len( | ||||
|         shape | ||||
|     ) | ||||
|     assert isinstance(points, torch.Tensor) and (points.shape[0] == 2), "points are wrong : {:}".format(points.shape) | ||||
|     ) == 2, "invalid shape : {:}".format(shape) | ||||
|     assert isinstance(points, torch.Tensor) and ( | ||||
|         points.shape[0] == 2 | ||||
|     ), "points are wrong : {:}".format(points.shape) | ||||
|     (H, W), points = shape, points.clone() | ||||
|     points[0, :] = denormalize_L(points[0, :], W) | ||||
|     points[1, :] = denormalize_L(points[1, :], H) | ||||
| @@ -112,10 +118,12 @@ def denormalize_points(shape, points): | ||||
|  | ||||
| # shape is a tuple [H, W] | ||||
| def denormalize_points_batch(shape, points): | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len(shape) == 2, "invalid shape : {:}".format( | ||||
|     assert (isinstance(shape, tuple) or isinstance(shape, list)) and len( | ||||
|         shape | ||||
|     ) | ||||
|     assert isinstance(points, torch.Tensor) and (points.shape[-1] == 2), "points are wrong : {:}".format(points.shape) | ||||
|     ) == 2, "invalid shape : {:}".format(shape) | ||||
|     assert isinstance(points, torch.Tensor) and ( | ||||
|         points.shape[-1] == 2 | ||||
|     ), "points are wrong : {:}".format(points.shape) | ||||
|     (H, W), points = shape, points.clone() | ||||
|     x = denormalize_L(points[..., 0], W) | ||||
|     y = denormalize_L(points[..., 1], H) | ||||
| @@ -145,5 +153,7 @@ def affine2image(image, theta, shape): | ||||
|     theta = theta[:2, :].unsqueeze(0) | ||||
|     grid_size = torch.Size([1, C, shape[0], shape[1]]) | ||||
|     grid = F.affine_grid(theta, grid_size) | ||||
|     affI = F.grid_sample(image.unsqueeze(0), grid, mode="bilinear", padding_mode="border") | ||||
|     affI = F.grid_sample( | ||||
|         image.unsqueeze(0), grid, mode="bilinear", padding_mode="border" | ||||
|     ) | ||||
|     return affI.squeeze(0) | ||||
|   | ||||
| @@ -48,7 +48,11 @@ def get_model_infos(model, shape): | ||||
|     if hasattr(model, "auxiliary_param"): | ||||
|         aux_params = count_parameters_in_MB(model.auxiliary_param()) | ||||
|         print("The auxiliary params of this model is : {:}".format(aux_params)) | ||||
|         print("We remove the auxiliary params from the total params ({:}) when counting".format(Param)) | ||||
|         print( | ||||
|             "We remove the auxiliary params from the total params ({:}) when counting".format( | ||||
|                 Param | ||||
|             ) | ||||
|         ) | ||||
|         Param = Param - aux_params | ||||
|  | ||||
|     # print_log('FLOPs : {:} MB'.format(FLOPs), log) | ||||
| @@ -92,7 +96,9 @@ def pool_flops_counter_hook(pool_module, inputs, output): | ||||
|     out_C, output_height, output_width = output.shape[1:] | ||||
|     assert out_C == inputs[0].size(1), "{:} vs. {:}".format(out_C, inputs[0].size()) | ||||
|  | ||||
|     overall_flops = batch_size * out_C * output_height * output_width * kernel_size * kernel_size | ||||
|     overall_flops = ( | ||||
|         batch_size * out_C * output_height * output_width * kernel_size * kernel_size | ||||
|     ) | ||||
|     pool_module.__flops__ += overall_flops | ||||
|  | ||||
|  | ||||
| @@ -104,7 +110,9 @@ def self_calculate_flops_counter_hook(self_module, inputs, output): | ||||
| def fc_flops_counter_hook(fc_module, inputs, output): | ||||
|     batch_size = inputs[0].size(0) | ||||
|     xin, xout = fc_module.in_features, fc_module.out_features | ||||
|     assert xin == inputs[0].size(1) and xout == output.size(1), "IO=({:}, {:})".format(xin, xout) | ||||
|     assert xin == inputs[0].size(1) and xout == output.size(1), "IO=({:}, {:})".format( | ||||
|         xin, xout | ||||
|     ) | ||||
|     overall_flops = batch_size * xin * xout | ||||
|     if fc_module.bias is not None: | ||||
|         overall_flops += batch_size * xout | ||||
| @@ -136,7 +144,9 @@ def conv2d_flops_counter_hook(conv_module, inputs, output): | ||||
|     in_channels = conv_module.in_channels | ||||
|     out_channels = conv_module.out_channels | ||||
|     groups = conv_module.groups | ||||
|     conv_per_position_flops = kernel_height * kernel_width * in_channels * out_channels / groups | ||||
|     conv_per_position_flops = ( | ||||
|         kernel_height * kernel_width * in_channels * out_channels / groups | ||||
|     ) | ||||
|  | ||||
|     active_elements_count = batch_size * output_height * output_width | ||||
|     overall_flops = conv_per_position_flops * active_elements_count | ||||
| @@ -184,7 +194,9 @@ def add_flops_counter_hook_function(module): | ||||
|         if not hasattr(module, "__flops_handle__"): | ||||
|             handle = module.register_forward_hook(fc_flops_counter_hook) | ||||
|             module.__flops_handle__ = handle | ||||
|     elif isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d): | ||||
|     elif isinstance(module, torch.nn.AvgPool2d) or isinstance( | ||||
|         module, torch.nn.MaxPool2d | ||||
|     ): | ||||
|         if not hasattr(module, "__flops_handle__"): | ||||
|             handle = module.register_forward_hook(pool_flops_counter_hook) | ||||
|             module.__flops_handle__ = handle | ||||
|   | ||||
| @@ -2,7 +2,15 @@ import os | ||||
|  | ||||
|  | ||||
| class GPUManager: | ||||
|     queries = ("index", "gpu_name", "memory.free", "memory.used", "memory.total", "power.draw", "power.limit") | ||||
|     queries = ( | ||||
|         "index", | ||||
|         "gpu_name", | ||||
|         "memory.free", | ||||
|         "memory.used", | ||||
|         "memory.total", | ||||
|         "power.draw", | ||||
|         "power.limit", | ||||
|     ) | ||||
|  | ||||
|     def __init__(self): | ||||
|         all_gpus = self.query_gpu(False) | ||||
| @@ -28,7 +36,9 @@ class GPUManager: | ||||
|                 find = False | ||||
|                 for gpu in all_gpus: | ||||
|                     if gpu["index"] == CUDA_VISIBLE_DEVICE: | ||||
|                         assert not find, "Duplicate cuda device index : {}".format(CUDA_VISIBLE_DEVICE) | ||||
|                         assert not find, "Duplicate cuda device index : {}".format( | ||||
|                             CUDA_VISIBLE_DEVICE | ||||
|                         ) | ||||
|                         find = True | ||||
|                         selected_gpus.append(gpu.copy()) | ||||
|                         selected_gpus[-1]["index"] = "{}".format(idx) | ||||
| @@ -52,7 +62,9 @@ class GPUManager: | ||||
|  | ||||
|     def select_by_memory(self, numbers=1): | ||||
|         all_gpus = self.query_gpu(False) | ||||
|         assert numbers <= len(all_gpus), "Require {} gpus more than you have".format(numbers) | ||||
|         assert numbers <= len(all_gpus), "Require {} gpus more than you have".format( | ||||
|             numbers | ||||
|         ) | ||||
|         alls = [] | ||||
|         for idx, gpu in enumerate(all_gpus): | ||||
|             free_memory = gpu["memory.free"] | ||||
|   | ||||
| @@ -4,7 +4,7 @@ import numpy as np | ||||
| from copy import deepcopy | ||||
| import torch.nn as nn | ||||
|  | ||||
| # from utils  import obtain_accuracy | ||||
| # modules in AutoDL | ||||
| from models import CellStructure | ||||
| from log_utils import time_string | ||||
|  | ||||
| @@ -56,11 +56,20 @@ def evaluate_one_shot(model, xloader, api, cal_mode, seed=111): | ||||
|             correct = (preds == targets.cuda()).float() | ||||
|             accuracies.append(correct.mean().item()) | ||||
|             if idx != 0 and (idx % 500 == 0 or idx + 1 == len(archs)): | ||||
|                 cor_accs_valid = np.corrcoef(accuracies, gt_accs_10_valid[: idx + 1])[0, 1] | ||||
|                 cor_accs_test = np.corrcoef(accuracies, gt_accs_10_test[: idx + 1])[0, 1] | ||||
|                 cor_accs_valid = np.corrcoef(accuracies, gt_accs_10_valid[: idx + 1])[ | ||||
|                     0, 1 | ||||
|                 ] | ||||
|                 cor_accs_test = np.corrcoef(accuracies, gt_accs_10_test[: idx + 1])[ | ||||
|                     0, 1 | ||||
|                 ] | ||||
|                 print( | ||||
|                     "{:} {:05d}/{:05d} mode={:5s}, correlation : accs={:.5f} for CIFAR-10 valid, {:.5f} for CIFAR-10 test.".format( | ||||
|                         time_string(), idx, len(archs), "Train" if cal_mode else "Eval", cor_accs_valid, cor_accs_test | ||||
|                         time_string(), | ||||
|                         idx, | ||||
|                         len(archs), | ||||
|                         "Train" if cal_mode else "Eval", | ||||
|                         cor_accs_valid, | ||||
|                         cor_accs_test, | ||||
|                     ) | ||||
|                 ) | ||||
|     model.load_state_dict(weights) | ||||
|   | ||||
							
								
								
									
										101
									
								
								lib/utils/qlib_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								lib/utils/qlib_utils.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,101 @@ | ||||
| import numpy as np | ||||
| from typing import List, Text | ||||
| from collections import defaultdict, OrderedDict | ||||
|  | ||||
|  | ||||
| class QResult: | ||||
|     """A class to maintain the results of a qlib experiment.""" | ||||
|  | ||||
|     def __init__(self, name): | ||||
|         self._result = defaultdict(list) | ||||
|         self._name = name | ||||
|         self._recorder_paths = [] | ||||
|  | ||||
|     def append(self, key, value): | ||||
|         self._result[key].append(value) | ||||
|  | ||||
|     def append_path(self, xpath): | ||||
|         self._recorder_paths.append(xpath) | ||||
|  | ||||
|     @property | ||||
|     def name(self): | ||||
|         return self._name | ||||
|  | ||||
|     @property | ||||
|     def paths(self): | ||||
|         return self._recorder_paths | ||||
|  | ||||
|     @property | ||||
|     def result(self): | ||||
|         return self._result | ||||
|  | ||||
|     @property | ||||
|     def keys(self): | ||||
|         return list(self._result.keys()) | ||||
|  | ||||
|     def __len__(self): | ||||
|         return len(self._result) | ||||
|  | ||||
|     def __repr__(self): | ||||
|         return "{name}({xname}, {num} metrics)".format( | ||||
|             name=self.__class__.__name__, xname=self.name, num=len(self.result) | ||||
|         ) | ||||
|  | ||||
|     def __getitem__(self, key): | ||||
|         if key not in self._result: | ||||
|             raise ValueError( | ||||
|                 "Invalid key {:}, please use one of {:}".format(key, self.keys) | ||||
|             ) | ||||
|         values = self._result[key] | ||||
|         return float(np.mean(values)) | ||||
|  | ||||
|     def update(self, metrics, filter_keys=None): | ||||
|         for key, value in metrics.items(): | ||||
|             if filter_keys is not None and key in filter_keys: | ||||
|                 key = filter_keys[key] | ||||
|             elif filter_keys is not None: | ||||
|                 continue | ||||
|             self.append(key, value) | ||||
|  | ||||
|     @staticmethod | ||||
|     def full_str(xstr, space): | ||||
|         xformat = "{:" + str(space) + "s}" | ||||
|         return xformat.format(str(xstr)) | ||||
|  | ||||
|     @staticmethod | ||||
|     def merge_dict(dict_list): | ||||
|         new_dict = dict() | ||||
|         for xkey in dict_list[0].keys(): | ||||
|             values = [x for xdict in dict_list for x in xdict[xkey]] | ||||
|             new_dict[xkey] = values | ||||
|         return new_dict | ||||
|  | ||||
|     def info( | ||||
|         self, | ||||
|         keys: List[Text], | ||||
|         separate: Text = "& ", | ||||
|         space: int = 20, | ||||
|         verbose: bool = True, | ||||
|     ): | ||||
|         avaliable_keys = [] | ||||
|         for key in keys: | ||||
|             if key not in self.result: | ||||
|                 print("There are invalid key [{:}].".format(key)) | ||||
|             else: | ||||
|                 avaliable_keys.append(key) | ||||
|         head_str = separate.join([self.full_str(x, space) for x in avaliable_keys]) | ||||
|         values = [] | ||||
|         for key in avaliable_keys: | ||||
|             if "IR" in key: | ||||
|                 current_values = [x * 100 for x in self._result[key]] | ||||
|             else: | ||||
|                 current_values = self._result[key] | ||||
|             mean = np.mean(current_values) | ||||
|             std = np.std(current_values) | ||||
|             # values.append("{:.4f} $\pm$ {:.4f}".format(mean, std)) | ||||
|             values.append("{:.2f} $\pm$ {:.2f}".format(mean, std)) | ||||
|         value_str = separate.join([self.full_str(x, space) for x in values]) | ||||
|         if verbose: | ||||
|             print(head_str) | ||||
|             print(value_str) | ||||
|         return head_str, value_str | ||||
| @@ -8,10 +8,14 @@ def split_str2indexes(string: str, max_check: int, length_limit=5): | ||||
|         if len(srange) != 2: | ||||
|             raise ValueError("invalid srange : {:}".format(srange)) | ||||
|         if length_limit is not None: | ||||
|             assert len(srange[0]) == len(srange[1]) == length_limit, "invalid srange : {:}".format(srange) | ||||
|             assert ( | ||||
|                 len(srange[0]) == len(srange[1]) == length_limit | ||||
|             ), "invalid srange : {:}".format(srange) | ||||
|         srange = (int(srange[0]), int(srange[1])) | ||||
|         if not (0 <= srange[0] <= srange[1] < max_check): | ||||
|             raise ValueError("{:} vs {:} vs {:}".format(srange[0], srange[1], max_check)) | ||||
|             raise ValueError( | ||||
|                 "{:} vs {:} vs {:}".format(srange[0], srange[1], max_check) | ||||
|             ) | ||||
|         for i in range(srange[0], srange[1] + 1): | ||||
|             indexes.add(i) | ||||
|     return indexes | ||||
|   | ||||
| @@ -21,7 +21,11 @@ def get_conv2D_Wmats(tensor: np.ndarray) -> List[np.ndarray]: | ||||
|     """ | ||||
|     mats = [] | ||||
|     N, M, imax, jmax = tensor.shape | ||||
|     assert N + M >= imax + jmax, "invalid tensor shape detected: {}x{} (NxM), {}x{} (i,j)".format(N, M, imax, jmax) | ||||
|     assert ( | ||||
|         N + M >= imax + jmax | ||||
|     ), "invalid tensor shape detected: {}x{} (NxM), {}x{} (i,j)".format( | ||||
|         N, M, imax, jmax | ||||
|     ) | ||||
|     for i in range(imax): | ||||
|         for j in range(jmax): | ||||
|             w = tensor[:, :, i, j] | ||||
| @@ -58,7 +62,17 @@ def glorot_norm_fix(w, n, m, rf_size): | ||||
|     return w | ||||
|  | ||||
|  | ||||
| def analyze_weights(weights, min_size, max_size, alphas, lognorms, spectralnorms, softranks, normalize, glorot_fix): | ||||
| def analyze_weights( | ||||
|     weights, | ||||
|     min_size, | ||||
|     max_size, | ||||
|     alphas, | ||||
|     lognorms, | ||||
|     spectralnorms, | ||||
|     softranks, | ||||
|     normalize, | ||||
|     glorot_fix, | ||||
| ): | ||||
|     results = OrderedDict() | ||||
|     count = len(weights) | ||||
|     if count == 0: | ||||
| @@ -94,12 +108,16 @@ def analyze_weights(weights, min_size, max_size, alphas, lognorms, spectralnorms | ||||
|             lambda0 = None | ||||
|  | ||||
|         if M < min_size: | ||||
|             summary = "Weight matrix {}/{} ({},{}): Skipping: too small (<{})".format(i + 1, count, M, N, min_size) | ||||
|             summary = "Weight matrix {}/{} ({},{}): Skipping: too small (<{})".format( | ||||
|                 i + 1, count, M, N, min_size | ||||
|             ) | ||||
|             cur_res["summary"] = summary | ||||
|             continue | ||||
|         elif max_size > 0 and M > max_size: | ||||
|             summary = "Weight matrix {}/{} ({},{}): Skipping: too big (testing) (>{})".format( | ||||
|                 i + 1, count, M, N, max_size | ||||
|             summary = ( | ||||
|                 "Weight matrix {}/{} ({},{}): Skipping: too big (testing) (>{})".format( | ||||
|                     i + 1, count, M, N, max_size | ||||
|                 ) | ||||
|             ) | ||||
|             cur_res["summary"] = summary | ||||
|             continue | ||||
| @@ -153,7 +171,9 @@ def analyze_weights(weights, min_size, max_size, alphas, lognorms, spectralnorms | ||||
|             cur_res["lognormX"] = lognormX | ||||
|  | ||||
|             summary.append( | ||||
|                 "Weight matrix {}/{} ({},{}): LogNorm: {} ; LogNormX: {}".format(i + 1, count, M, N, lognorm, lognormX) | ||||
|                 "Weight matrix {}/{} ({},{}): LogNorm: {} ; LogNormX: {}".format( | ||||
|                     i + 1, count, M, N, lognorm, lognormX | ||||
|                 ) | ||||
|             ) | ||||
|  | ||||
|             if softranks: | ||||
| @@ -163,8 +183,10 @@ def analyze_weights(weights, min_size, max_size, alphas, lognorms, spectralnorms | ||||
|                 cur_res["softrank"] = softrank | ||||
|                 cur_res["softranklog"] = softranklog | ||||
|                 cur_res["softranklogratio"] = softranklogratio | ||||
|                 summary += "{}. Softrank: {}. Softrank log: {}. Softrank log ratio: {}".format( | ||||
|                     summary, softrank, softranklog, softranklogratio | ||||
|                 summary += ( | ||||
|                     "{}. Softrank: {}. Softrank log: {}. Softrank log ratio: {}".format( | ||||
|                         summary, softrank, softranklog, softranklogratio | ||||
|                     ) | ||||
|                 ) | ||||
|         cur_res["summary"] = "\n".join(summary) | ||||
|     return results | ||||
| @@ -209,7 +231,17 @@ def compute_details(results): | ||||
|         metrics_stats.append("{}_compound_avg".format(metric)) | ||||
|  | ||||
|     columns = ( | ||||
|         ["layer_id", "layer_type", "N", "M", "layer_count", "slice", "slice_count", "level", "comment"] | ||||
|         [ | ||||
|             "layer_id", | ||||
|             "layer_type", | ||||
|             "N", | ||||
|             "M", | ||||
|             "layer_count", | ||||
|             "slice", | ||||
|             "slice_count", | ||||
|             "level", | ||||
|             "comment", | ||||
|         ] | ||||
|         + [*metrics] | ||||
|         + metrics_stats | ||||
|     ) | ||||
| @@ -351,7 +383,15 @@ def analyze( | ||||
|         else: | ||||
|             weights = get_conv2D_Wmats(module.weight.cpu().detach().numpy()) | ||||
|         results = analyze_weights( | ||||
|             weights, min_size, max_size, alphas, lognorms, spectralnorms, softranks, normalize, glorot_fix | ||||
|             weights, | ||||
|             min_size, | ||||
|             max_size, | ||||
|             alphas, | ||||
|             lognorms, | ||||
|             spectralnorms, | ||||
|             softranks, | ||||
|             normalize, | ||||
|             glorot_fix, | ||||
|         ) | ||||
|         results["id"] = index | ||||
|         results["type"] = type(module) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user