Skip to content

Commit

Permalink
Set printing info for llava_hf to debug level
Browse files Browse the repository at this point in the history
  • Loading branch information
kcz358 committed May 23, 2024
1 parent 53f013e commit 45c05b2
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions lmms_eval/models/llava_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,8 @@ def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]:
labels[: len(contxt_id)] = -100

if self.accelerator.is_main_process and doc_id % 100 == 0:
eval_logger.info(f"Prompt for doc ID {doc_id}:\n\n{formatted_contexts[0]}\n")
eval_logger.info(f"Prompt and continuation for doc ID {doc_id}:\n\n{formatted_continuation[0]}\n")
eval_logger.debug(f"Prompt for doc ID {doc_id}:\n\n{formatted_contexts[0]}\n")
eval_logger.debug(f"Prompt and continuation for doc ID {doc_id}:\n\n{formatted_continuation[0]}\n")

with torch.inference_mode():
outputs = self.model(**model_inputs, labels=labels)
Expand Down Expand Up @@ -293,7 +293,7 @@ def _collate(x):
text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)

if self.accelerator.is_main_process and doc_id[0] % 100 == 0:
eval_logger.info(f"Prompt for doc ID {doc_id[0]}:\n\n{text}\n")
eval_logger.debug(f"Prompt for doc ID {doc_id[0]}:\n\n{text}\n")

inputs = self._image_processor(images=visuals, text=text, return_tensors="pt").to(self._device, self.model.dtype)

Expand Down Expand Up @@ -329,7 +329,7 @@ def _collate(x):
text_outputs = text_outputs.split("ASSISTANT:")[-1].strip()

if self.accelerator.is_main_process and doc_id[0] % 100 == 0:
eval_logger.info(f"Generated text for doc ID {doc_id[0]}:\n\n{text_outputs}\n")
eval_logger.debug(f"Generated text for doc ID {doc_id[0]}:\n\n{text_outputs}\n")

res.append(text_outputs)
self.cache_hook.add_partial("generate_until", (context, gen_kwargs), text_outputs)
Expand Down

0 comments on commit 45c05b2

Please sign in to comment.