跳到主要内容

ComfyUI 基础工作原理

ComfyUI 整个流程如下所示

下面包含了基本的 ComfyUI 执行流程

import random
import torch
import sys

sys.path.append("../")
from nodes import (
VAEDecode,
KSamplerAdvanced,
EmptyLatentImage,
SaveImage,
CheckpointLoaderSimple,
CLIPTextEncode,
)

def main():
with torch.inference_mode():

# 首先加载扩散模型,扩散模型用来去除潜在的噪声。该节点还将提供适当的 VAE 和 CLIP 模型。
checkpointloadersimple = CheckpointLoaderSimple()
checkpointloadersimple_4 = checkpointloadersimple.load_checkpoint(
ckpt_name="sd_xl_base_1.0.safetensors"
)

# 创建一个空的隐空间(txt2img)
emptylatentimage = EmptyLatentImage()
emptylatentimage_5 = emptylatentimage.generate(
width=1024, height=1024, batch_size=1
)

# 创建一个 CLIP 用来加载引导词(下同)
cliptextencode = CLIPTextEncode()
cliptextencode_6 = cliptextencode.encode(
text="evening sunset scenery blue sky nature, glass bottle with a galaxy in it",
clip=checkpointloadersimple_4[1],
)

cliptextencode_7 = cliptextencode.encode(
text="text, watermark", clip=checkpointloadersimple_4[1]
)

ksampleradvanced = KSamplerAdvanced()
vaedecode = VAEDecode()
saveimage = SaveImage()

# 采样器开始轮询采样,这个 KSampler 使用了 euler 采样器
ksampleradvanced_10 = ksampleradvanced.sample(
add_noise="enable",
noise_seed=random.randint(1, 2**64), # 随机生成噪声
steps=25, # 25 步采样
cfg=8, # 配置与关键词的引导强度
sampler_name="euler",
scheduler="normal",
# 这个 start_at_step 和 end_at_step 主要用于多模型的场景,例如通过模型 A 完成 0-10 step,再通过 B 模型完成 10-20 step
start_at_step=0,
end_at_step=20,
return_with_leftover_noise="enable",
model=checkpointloadersimple_4[0],
positive=cliptextencode_6[0],
negative=cliptextencode_7[0],
latent_image=emptylatentimage_5[0],
)

# 隐空间经过 VAEDecode 节点解码后还原到像素空间
vaedecode_17 = vaedecode.decode(
samples=ksampleradvanced_10[0], vae=checkpointloadersimple_4[2]
)

# 这里就是图片了
saveimage_19 = saveimage.save_images(
filename_prefix="ComfyUI", images=vaedecode_17[0]
)


if __name__ == "__main__":
main()