fix(api): move prompt parsing logs to trace level
This commit is contained in:
parent
d321321507
commit
ed8a7c8934
|
@ -50,7 +50,7 @@ def expand_prompt(
|
||||||
)
|
)
|
||||||
|
|
||||||
groups_count = ceil(tokens.input_ids.shape[1] / MAX_TOKENS_PER_GROUP)
|
groups_count = ceil(tokens.input_ids.shape[1] / MAX_TOKENS_PER_GROUP)
|
||||||
logger.debug("splitting %s into %s groups", tokens.input_ids.shape, groups_count)
|
logger.trace("splitting %s into %s groups", tokens.input_ids.shape, groups_count)
|
||||||
|
|
||||||
groups = []
|
groups = []
|
||||||
# np.array_split(tokens.input_ids, groups_count, axis=1)
|
# np.array_split(tokens.input_ids, groups_count, axis=1)
|
||||||
|
@ -59,19 +59,19 @@ def expand_prompt(
|
||||||
group_end = min(
|
group_end = min(
|
||||||
group_start + MAX_TOKENS_PER_GROUP, tokens.input_ids.shape[1]
|
group_start + MAX_TOKENS_PER_GROUP, tokens.input_ids.shape[1]
|
||||||
) # or should this be 1?
|
) # or should this be 1?
|
||||||
logger.debug("building group for token slice [%s : %s]", group_start, group_end)
|
logger.trace("building group for token slice [%s : %s]", group_start, group_end)
|
||||||
groups.append(tokens.input_ids[:, group_start:group_end])
|
groups.append(tokens.input_ids[:, group_start:group_end])
|
||||||
|
|
||||||
# encode each chunk
|
# encode each chunk
|
||||||
logger.debug("group token shapes: %s", [t.shape for t in groups])
|
logger.trace("group token shapes: %s", [t.shape for t in groups])
|
||||||
group_embeds = []
|
group_embeds = []
|
||||||
for group in groups:
|
for group in groups:
|
||||||
logger.debug("encoding group: %s", group.shape)
|
logger.trace("encoding group: %s", group.shape)
|
||||||
embeds = self.text_encoder(input_ids=group.astype(np.int32))[0]
|
embeds = self.text_encoder(input_ids=group.astype(np.int32))[0]
|
||||||
group_embeds.append(embeds)
|
group_embeds.append(embeds)
|
||||||
|
|
||||||
# concat those embeds
|
# concat those embeds
|
||||||
logger.debug("group embeds shape: %s", [t.shape for t in group_embeds])
|
logger.trace("group embeds shape: %s", [t.shape for t in group_embeds])
|
||||||
prompt_embeds = np.concatenate(group_embeds, axis=1)
|
prompt_embeds = np.concatenate(group_embeds, axis=1)
|
||||||
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
|
prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ def expand_prompt(
|
||||||
input_ids=uncond_input.input_ids.astype(np.int32)
|
input_ids=uncond_input.input_ids.astype(np.int32)
|
||||||
)[0]
|
)[0]
|
||||||
negative_padding = tokens.input_ids.shape[1] - negative_prompt_embeds.shape[1]
|
negative_padding = tokens.input_ids.shape[1] - negative_prompt_embeds.shape[1]
|
||||||
logger.debug(
|
logger.trace(
|
||||||
"padding negative prompt to match input: %s, %s, %s extra tokens",
|
"padding negative prompt to match input: %s, %s, %s extra tokens",
|
||||||
tokens.input_ids.shape,
|
tokens.input_ids.shape,
|
||||||
negative_prompt_embeds.shape,
|
negative_prompt_embeds.shape,
|
||||||
|
@ -128,7 +128,7 @@ def expand_prompt(
|
||||||
# to avoid doing two forward passes
|
# to avoid doing two forward passes
|
||||||
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
||||||
|
|
||||||
logger.debug("expanded prompt shape: %s", prompt_embeds.shape)
|
logger.trace("expanded prompt shape: %s", prompt_embeds.shape)
|
||||||
return prompt_embeds
|
return prompt_embeds
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue