Error in Train RT-DETR

I was trying to reproduce this recent article (How to Train RT-DETR on a Custom Dataset with Transformers) following the colab added to the post. But i see following error when I want start the training as following:

trainer = Trainer(
model=model,
args=training_args,
train_dataset=pytorch_dataset_train,
eval_dataset=pytorch_dataset_valid,
tokenizer=processor,
data_collator=collate_fn,
compute_metrics=eval_compute_metrics_fn,
)

trainer.train()


RuntimeError Traceback (most recent call last)
Cell In[65], line 11
1 trainer = Trainer(
2 model=model,
3 args=training_args,
(…)
8 compute_metrics=eval_compute_metrics_fn,
9 )
—> 11 trainer.train()

File ~/coco_benchmark/coco/lib/python3.10/site-packages/transformers/trainer.py:1938, in Trainer.train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
1936 hf_hub_utils.enable_progress_bars()
1937 else:
→ 1938 return inner_training_loop(
1939 args=args,
1940 resume_from_checkpoint=resume_from_checkpoint,
1941 trial=trial,
1942 ignore_keys_for_eval=ignore_keys_for_eval,
1943 )

File ~/coco_benchmark/coco/lib/python3.10/site-packages/transformers/trainer.py:2279, in Trainer._inner_training_loop(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)
2276 self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
2278 with self.accelerator.accumulate(model):
→ 2279 tr_loss_step = self.training_step(model, inputs)
2281 if (
2282 args.logging_nan_inf_filter
2283 and not is_torch_xla_available()
2284 and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
2285 ):
2286 # if loss is nan or inf simply add the average of previous logged losses
2287 tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)

File ~/coco_benchmark/coco/lib/python3.10/site-packages/transformers/trainer.py:3318, in Trainer.training_step(self, model, inputs)
3315 return loss_mb.reduce_mean().detach().to(self.args.device)
3317 with self.compute_loss_context_manager():
→ 3318 loss = self.compute_loss(model, inputs)
3320 del inputs
3321 if (
3322 self.args.torch_empty_cache_steps is not None
3323 and self.state.global_step % self.args.torch_empty_cache_steps == 0
3324 ):

File ~/coco_benchmark/coco/lib/python3.10/site-packages/transformers/trainer.py:3363, in Trainer.compute_loss(self, model, inputs, return_outputs)
3361 else:
3362 labels = None
→ 3363 outputs = model(**inputs)
3364 # Save past state if it exists
3365 # TODO: this needs to be fixed and made cleaner later.
3366 if self.args.past_index >= 0:

File ~/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/modules/module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
→ 1532 return self._call_impl(*args, **kwargs)

File ~/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/modules/module.py:1541, in Module._call_impl(self, *args, **kwargs)
1536 # If we don’t have any hooks, we want to skip the rest of the logic in
1537 # this function, and just call forward.
1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1541 return forward_call(*args, **kwargs)
1543 try:
1544 result = None

File ~/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py:185, in DataParallel.forward(self, *inputs, **kwargs)
183 return self.module(*inputs[0], **module_kwargs[0])
184 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
→ 185 outputs = self.parallel_apply(replicas, inputs, module_kwargs)
186 return self.gather(outputs, self.output_device)

File ~/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py:200, in DataParallel.parallel_apply(self, replicas, inputs, kwargs)
199 def parallel_apply(self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) → List[Any]:
→ 200 return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])

File ~/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py:108, in parallel_apply(modules, inputs, kwargs_tup, devices)
106 output = results[i]
107 if isinstance(output, ExceptionWrapper):
→ 108 output.reraise()
109 outputs.append(output)
110 return outputs

File ~/coco_benchmark/coco/lib/python3.10/site-packages/torch/_utils.py:705, in ExceptionWrapper.reraise(self)
701 except TypeError:
702 # If the exception takes multiple arguments, don’t try to
703 # instantiate since we don’t know how to
704 raise RuntimeError(msg) from None
→ 705 raise exception

RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File “/home/md267396/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py”, line 83, in _worker
output = module(*input, **kwargs)
File “/home/md267396/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/modules/module.py”, line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “/home/md267396/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/modules/module.py”, line 1541, in _call_impl
return forward_call(*args, **kwargs)
File “/home/md267396/coco_benchmark/coco/lib/python3.10/site-packages/transformers/models/rt_detr/modeling_rt_detr.py”, line 2616, in forward
outputs = self.model(
File “/home/md267396/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/modules/module.py”, line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “/home/md267396/coco_benchmark/coco/lib/python3.10/site-packages/torch/nn/modules/module.py”, line 1541, in _call_impl
return forward_call(*args, **kwargs)
File “/home/md267396/coco_benchmark/coco/lib/python3.10/site-packages/transformers/models/rt_detr/modeling_rt_detr.py”, line 1850, in forward
reference_points_unact = torch.concat([denoising_bbox_unact, reference_points_unact], 1)
RuntimeError: Sizes of tensors must match except in dimension 1. Expected size 128 but got size 16 for tensor number 1 in the list.

Hello! Our team just confirmed the notebook works as specified if all instructions are followed. We suggest starting a new notebook as there may be setup issues in the current notebook.

This topic was automatically closed 21 days after the last reply. New replies are no longer allowed.