diff --git a/models/discriminator.py b/models/discriminator.py index 0f8b7be..6da1d51 100644 --- a/models/discriminator.py +++ b/models/discriminator.py @@ -15,8 +15,8 @@ def conv3d(in_channel:int, out_channel:int): nn.Module: Conv3d Module """ return nn.Sequential( - nn.Conv3d(channel[0], channel[1], kernel_size=4, stride=2, padding=1, bias=False), - nn.BatchNorm3d(channel[1]), + nn.Conv3d(in_channel, out_channel, kernel_size=4, stride=2, padding=1, bias=False), + nn.BatchNorm3d(out_channel), nn.LeakyReLU(0.2, inplace=True), ) diff --git a/requirements.txt b/requirements.txt index d7cc8d8..f162945 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -fancy_einsum torch torchvision tqdm diff --git a/utils/utils.py b/utils/utils.py index c245ae4..9d5bd5a 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -55,17 +55,17 @@ def preprocess(images, sizex=None, sizey=None): -def downsample(mat:np.ndarray, down_sample=64)->np.ndarray: +def downsample(mat:np.ndarray, new_size=64)->np.ndarray: """ Args: mat (np.ndarray): 3d image - down_sample (int, optional): the new number of voxels cubes. Defaults to 64. + new_size (int, optional): the new number of voxels cubes. Defaults to 64. Returns: np.ndarray: 3d images after downsampling """ - lost_dim = int(mat.shape[2]/down_sample) - return mat.reshape((-1, down_sample, lost_dim, down_sample, lost_dim, down_sample, lost_dim)).mean( + lost_dim = int(mat.shape[2]/new_size) + return mat.reshape((-1, new_size, lost_dim, new_size, lost_dim, new_size, lost_dim)).mean( axis=(2, 4, 6) ) @@ -97,4 +97,4 @@ def load_data(data_path:str, down_sample=None): img.append(cv2.imread(folder_path + "/" + format(i, "03d") + ".png")) # pylint: disable=no-member img2d.append(preprocess(np.array(img))) img3d.append(mat_data) - return img2d, img3d \ No newline at end of file + return img2d, img3d