Hello,
I am hung up on this week #4 C1W4A building a Conditional Gan last step to get full credit.
Can you point me in the right direction please?
I know it is a dimension mismatch.
RuntimeError Traceback (most recent call last)
Input In [29], in <cell line: 16>()
58 fake_image_and_labels = combine_vectors(fake.detach(), image_one_hot_labels)
59 real_image_and_labels = combine_vectors(real, image_one_hot_labels)
β> 60 disc_fake_pred = disc (fake_image_and_labels)
61 disc_real_pred = disc (real_image_and_labels)
62 #### END CODE HERE ####
63
64 # Make sure shapes are correct
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we donβt have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
β 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = ,
Input In [18], in Discriminator.forward(self, image)
40 def forward(self, image):
41 βββ
42 Function for completing a forward pass of the discriminator: Given an image tensor,
43 returns a 1-dimension tensor representing fake/real.
44 Parameters:
45 image: a flattened image tensor with dimension (im_chan)
46 βββ
β> 47 disc_pred = self.disc(image)
48 return disc_pred.view(len(disc_pred), -1)
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we donβt have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
β 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = ,
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
β 204 input = module(input)
205 return input
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we donβt have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
β 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = ,
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
β 204 input = module(input)
205 return input
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we donβt have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
β 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = ,
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/conv.py:463, in Conv2d.forward(self, input)
462 def forward(self, input: Tensor) β Tensor:
β 463 return self._conv_forward(input, self.weight, self.bias)
File /usr/local/lib/python3.8/dist-packages/torch/nn/modules/conv.py:459, in Conv2d._conv_forward(self, input, weight, bias)
455 if self.padding_mode != βzerosβ:
456 return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
457 weight, bias, self.stride,
458 _pair(0), self.dilation, self.groups)
β 459 return F.conv2d(input, weight, bias, self.stride,
460 self.padding, self.dilation, self.groups)
RuntimeError: Given groups=1, weight of size [64, 1, 4, 4], expected input[128, 11, 28, 28] to have 1 channels, but got 11 channels instead