null checks
This commit is contained in:
parent
2b65077d82
commit
eb8bd145c9
|
@ -269,7 +269,12 @@ def convert_model_diffusion(conversion: ConversionContext, model):
|
||||||
model_format = source_format(model)
|
model_format = source_format(model)
|
||||||
|
|
||||||
pipeline = model.get("pipeline", "txt2img")
|
pipeline = model.get("pipeline", "txt2img")
|
||||||
|
logger.trace("converting diffusion model using pipeline %s", pipeline)
|
||||||
|
|
||||||
converter = model_converters.get(pipeline)
|
converter = model_converters.get(pipeline)
|
||||||
|
if converter is None:
|
||||||
|
raise ValueError("cannot find converter for pipeline")
|
||||||
|
|
||||||
converted, dest = converter(
|
converted, dest = converter(
|
||||||
conversion,
|
conversion,
|
||||||
model,
|
model,
|
||||||
|
@ -500,7 +505,7 @@ def register_plugins(conversion: ConversionContext):
|
||||||
logger.info("loading conversion plugins")
|
logger.info("loading conversion plugins")
|
||||||
exports = load_plugins(conversion)
|
exports = load_plugins(conversion)
|
||||||
|
|
||||||
for proto, client in exports.clients:
|
for proto, client in exports.clients.items():
|
||||||
try:
|
try:
|
||||||
add_model_source(proto, client)
|
add_model_source(proto, client)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
|
@ -198,27 +198,29 @@ def expand_prompt(
|
||||||
negative_prompt_embeds = self.text_encoder(
|
negative_prompt_embeds = self.text_encoder(
|
||||||
input_ids=uncond_input.input_ids.astype(np.int32)
|
input_ids=uncond_input.input_ids.astype(np.int32)
|
||||||
)[0]
|
)[0]
|
||||||
negative_padding = tokens.input_ids.shape[1] - negative_prompt_embeds.shape[1]
|
|
||||||
logger.trace(
|
|
||||||
"padding negative prompt to match input: %s, %s, %s extra tokens",
|
|
||||||
tokens.input_ids.shape,
|
|
||||||
negative_prompt_embeds.shape,
|
|
||||||
negative_padding,
|
|
||||||
)
|
|
||||||
negative_prompt_embeds = np.pad(
|
|
||||||
negative_prompt_embeds,
|
|
||||||
[(0, 0), (0, negative_padding), (0, 0)],
|
|
||||||
mode="constant",
|
|
||||||
constant_values=0,
|
|
||||||
)
|
|
||||||
negative_prompt_embeds = np.repeat(
|
|
||||||
negative_prompt_embeds, num_images_per_prompt, axis=0
|
|
||||||
)
|
|
||||||
|
|
||||||
# For classifier free guidance, we need to do two forward passes.
|
if negative_prompt_embeds is not None:
|
||||||
# Here we concatenate the unconditional and text embeddings into a single batch
|
negative_padding = tokens.input_ids.shape[1] - negative_prompt_embeds.shape[1]
|
||||||
# to avoid doing two forward passes
|
logger.trace(
|
||||||
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
"padding negative prompt to match input: %s, %s, %s extra tokens",
|
||||||
|
tokens.input_ids.shape,
|
||||||
|
negative_prompt_embeds.shape,
|
||||||
|
negative_padding,
|
||||||
|
)
|
||||||
|
negative_prompt_embeds = np.pad(
|
||||||
|
negative_prompt_embeds,
|
||||||
|
[(0, 0), (0, negative_padding), (0, 0)],
|
||||||
|
mode="constant",
|
||||||
|
constant_values=0,
|
||||||
|
)
|
||||||
|
negative_prompt_embeds = np.repeat(
|
||||||
|
negative_prompt_embeds, num_images_per_prompt, axis=0
|
||||||
|
)
|
||||||
|
|
||||||
|
# For classifier free guidance, we need to do two forward passes.
|
||||||
|
# Here we concatenate the unconditional and text embeddings into a single batch
|
||||||
|
# to avoid doing two forward passes
|
||||||
|
prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
|
||||||
|
|
||||||
logger.trace("expanded prompt shape: %s", prompt_embeds.shape)
|
logger.trace("expanded prompt shape: %s", prompt_embeds.shape)
|
||||||
return prompt_embeds
|
return prompt_embeds
|
||||||
|
@ -427,7 +429,7 @@ def parse_wildcards(prompt: str, seed: int, wildcards: Dict[str, List[str]]) ->
|
||||||
|
|
||||||
wildcard = ""
|
wildcard = ""
|
||||||
if name in wildcards:
|
if name in wildcards:
|
||||||
wildcard = pop_random(wildcards.get(name))
|
wildcard = pop_random(wildcards[name])
|
||||||
else:
|
else:
|
||||||
logger.warning("unknown wildcard: %s", name)
|
logger.warning("unknown wildcard: %s", name)
|
||||||
|
|
||||||
|
@ -488,11 +490,11 @@ def parse_region_group(group: Tuple[str, ...]) -> Region:
|
||||||
top, left, bottom, right, weight, feather, prompt = group
|
top, left, bottom, right, weight, feather, prompt = group
|
||||||
|
|
||||||
# break down the feather section
|
# break down the feather section
|
||||||
feather_radius, *feather_edges = feather.split("_")
|
feather_radius, *feather_rest = feather.split("_")
|
||||||
if len(feather_edges) == 0:
|
if len(feather_rest) == 0:
|
||||||
feather_edges = "TLBR"
|
feather_edges = "TLBR"
|
||||||
else:
|
else:
|
||||||
feather_edges = "".join(feather_edges)
|
feather_edges = "".join(feather_rest)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
int(top),
|
int(top),
|
||||||
|
|
|
@ -536,7 +536,7 @@ def logger_main(pool: DevicePoolExecutor, logs: "Queue[str]"):
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
msg = logs.get(pool.join_timeout / 2)
|
msg = logs.get(timeout=(pool.join_timeout / 2))
|
||||||
logger.debug("received logs from worker: %s", msg)
|
logger.debug("received logs from worker: %s", msg)
|
||||||
except Empty:
|
except Empty:
|
||||||
# logger worker should not generate more logs if it doesn't have any logs
|
# logger worker should not generate more logs if it doesn't have any logs
|
||||||
|
|
Loading…
Reference in New Issue