-
Notifications
You must be signed in to change notification settings - Fork 5
Description
{
"name": "ValueError",
"message": "Expected parameter loc (Tensor of shape (225, 32)) of distribution Normal(loc: torch.Size([225, 32]), scale: torch.Size([225, 32])) to satisfy the constraint Real(), but found invalid values:
tensor([[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
...,
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan]], device='cuda:0',
grad_fn=)",
"stack": "---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[5], line 22
8 kp3d_mse_input = build_handler(dict(
9 type='Keypoint3dMSEInput',
10 keypoints3d=kps3d,
11 keypoints3d_conf=kps3d_conf,
12 keypoints3d_convention='smplx',
13 handler_key='keypoints3d_mse'))
15 kp3d_llen_input = build_handler(dict(
16 type='Keypoint3dLimbLenInput',
17 keypoints3d=kps3d,
18 keypoints3d_conf=kps3d_conf,
19 keypoints3d_convention='smplx',
20 handler_key='keypoints3d_limb_len'))
---> 22 smplifyx_output = smplifyx(input_list=[kp3d_mse_input, kp3d_llen_input])
File /wockspace/xrmocap_opti/xrmocap/model/registrant/smplify.py:268, in SMPLify.call(self, input_list, init_param_dict, return_verts, return_joints, return_full_pose, return_losses)
266 for i in range(self.n_epochs):
267 for stage_idx, stage_config in enumerate(self.stage_config):
--> 268 self.optimize_stage(
269 input_list=input_list,
270 optim_param=optim_param,
271 epoch_idx=i,
272 stage_idx=stage_idx,
273 **stage_config,
274 )
276 hook_kwargs = dict(input_list=input_list, optim_param=optim_param)
277 self.call_hook('after_optimize', **hook_kwargs)
File /wockspace/xrmocap_opti/xrmocap/model/registrant/smplify.py:477, in SMPLify.optimize_stage(self, input_list, optim_param, epoch_idx, stage_idx, use_shoulder_hip_only, body_weight, n_iter, ftol, **kwargs)
471 torch.nn.utils.clip_grad_norm_(
472 parameters=optim_param.values(),
473 max_norm=self.grad_clip)
475 return total_loss
--> 477 total_loss = optimizer.step(closure)
479 if iter_idx > 0 and previous_loss is not None and ftol > 0:
480 loss_rel_change = self.compute_relative_change(
481 previous_loss, total_loss.item())
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/optim/optimizer.py:140, in Optimizer._hook_for_profile..profile_hook_step..wrapper(*args, **kwargs)
138 profile_name = "Optimizer.step#{}.step".format(obj.class.name)
139 with torch.autograd.profiler.record_function(profile_name):
--> 140 out = func(*args, **kwargs)
141 obj._optimizer_step_code()
142 return out
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/autograd/grad_mode.py:27, in _DecoratorContextManager.call..decorate_context(*args, **kwargs)
24 @functools.wraps(func)
25 def decorate_context(*args, **kwargs):
26 with self.clone():
---> 27 return func(*args, **kwargs)
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/optim/lbfgs.py:426, in LBFGS.step(self, closure)
423 def obj_func(x, t, d):
424 return self._directional_evaluate(closure, x, t, d)
--> 426 loss, flat_grad, t, ls_func_evals = _strong_wolfe(
427 obj_func, x_init, t, d, loss, flat_grad, gtd)
428 self._add_grad(t, d)
429 opt_cond = flat_grad.abs().max() <= tolerance_grad
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/optim/lbfgs.py:148, in _strong_wolfe(obj_func, x, t, d, f, g, gtd, c1, c2, tolerance_change, max_ls)
145 insuf_progress = False
147 # Evaluate new point
--> 148 f_new, g_new = obj_func(x, t, d)
149 ls_func_evals += 1
150 gtd_new = g_new.dot(d)
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/optim/lbfgs.py:424, in LBFGS.step..obj_func(x, t, d)
423 def obj_func(x, t, d):
--> 424 return self._directional_evaluate(closure, x, t, d)
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/optim/lbfgs.py:278, in LBFGS._directional_evaluate(self, closure, x, t, d)
276 def _directional_evaluate(self, closure, x, t, d):
277 self._add_grad(t, d)
--> 278 loss = float(closure())
279 flat_grad = self._gather_flat_grad()
280 self._set_param(x)
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/autograd/grad_mode.py:27, in _DecoratorContextManager.call..decorate_context(*args, **kwargs)
24 @functools.wraps(func)
25 def decorate_context(*args, **kwargs):
26 with self.clone():
---> 27 return func(*args, **kwargs)
File /wockspace/xrmocap_opti/xrmocap/model/registrant/smplify.py:451, in SMPLify.optimize_stage..closure()
449 expanded_param.update(optim_param)
450 expanded_param['betas'] = betas_video
--> 451 loss_dict = self.evaluate(
452 input_list=input_list,
453 optim_param=expanded_param,
454 use_shoulder_hip_only=use_shoulder_hip_only,
455 body_weight=body_weight,
456 **kwargs)
458 if optimizer_key not in loss_dict.keys():
459 self.logger.error(
460 f'Individual optimizer is set for {optimizer_key}'
461 'but there is no loss calculated for this '
462 'optimizer. Please check LOSS_MAPPING and '
463 'make sure respective losses are turned on.')
File /wockspace/xrmocap_opti/xrmocap/model/registrant/smplify.py:588, in SMPLify.evaluate(self, input_list, optim_param, return_verts, return_full_pose, return_joints, use_shoulder_hip_only, body_weight, reduction_override, **kwargs)
586 body_pose = torch.clamp(body_pose, min=-10, max=10)
587 # try:
--> 588 body_pose_z= vp.encode(body_pose).mean
589 body_pose_rec = vp.decode(body_pose_z)['pose_body'].contiguous().view(-1, 63)
590 # except ValueError as e:
591 # print(f"Error during encoding/decoding: {e}")
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/vposer/vposer.py:194, in VPoserV2.encode(self, pose_body)
188 def encode(self, pose_body):
189 '''
190 :param Pin: Nx(numjoints*3)
191 :param rep_type: 'matrot'/'aa' for matrix rotations or axis-angle
192 :return:
193 '''
--> 194 return self.encoder_net(pose_body)
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/nn/modules/container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
--> 204 input = module(input)
205 return input
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/nn/modules/module.py:1194, in Module._call_impl(self, *input, **kwargs)
1190 # If we don't have any hooks, we want to skip the rest of the logic in
1191 # this function, and just call forward.
1192 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1193 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1194 return forward_call(*input, **kwargs)
1195 # Do not call functions when jit is used
1196 full_backward_hooks, non_full_backward_hooks = [], []
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/vposer/vposer.py:74, in NormalDistDecoder.forward(self, Xout)
72 logvar = self.logvar(Xout)
73 std = torch.sqrt(F.softplus(logvar) + 1e-6) # 加上小常数避免除零错误
---> 74 return torch.distributions.Normal(mu, std)
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/distributions/normal.py:56, in Normal.init(self, loc, scale, validate_args)
54 else:
55 batch_shape = self.loc.size()
---> 56 super(Normal, self).init(batch_shape, validate_args=validate_args)
File /opt/miniconda/envs/vis_smc_ipynb/lib/python3.10/site-packages/torch/distributions/distribution.py:56, in Distribution.init(self, batch_shape, event_shape, validate_args)
54 valid = constraint.check(value)
55 if not valid.all():
---> 56 raise ValueError(
57 f"Expected parameter {param} "
58 f"({type(value).name} of shape {tuple(value.shape)}) "
59 f"of distribution {repr(self)} "
60 f"to satisfy the constraint {repr(constraint)}, "
61 f"but found invalid values:
{value}"
62 )
63 super(Distribution, self).init()
ValueError: Expected parameter loc (Tensor of shape (225, 32)) of distribution Normal(loc: torch.Size([225, 32]), scale: torch.Size([225, 32])) to satisfy the constraint Real(), but found invalid values:
tensor([[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
...,
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan]], device='cuda:0',
grad_fn=)"
}
这个您这边有没有碰到过类似的情况,以及要怎么处理比较和是呢?