-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathgenerator.py
executable file
·129 lines (119 loc) · 4.11 KB
/
generator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import ollama
import logging
from jinja2 import Environment, FileSystemLoader, select_autoescape
import stablediff
from urllib import request, parse
import json
# set the logging level
logging.basicConfig(level=logging.INFO)
# create a jinja2 environment that loads templates from the current directory
env = Environment(loader=FileSystemLoader('.'))
def generate_text_ollama(model_name, full_prompt):
messages=[
{
'role': 'user',
'content': full_prompt,
},
]
response = ollama.chat(model=model_name, messages=messages)
return response['message']['content'], response['created_at']
def generate_image_comfy(prompt, negative_prompt, target_file):
context = {
"prompt": prompt,
"negative_prompt": negative_prompt,
}
workflow = render_template_from_file('comfy-workflow.json.j2', context=context)
# send the workflow to the comfy server
p = {"prompt": workflow}
data = json.dumps(p).encode('utf-8')
# save the workflow to a json file for debugging
with open('comfy-workflow-debug.json', 'w', newline='') as jsonfile:
# write the json response to the file
jsonfile.write(data.decode('utf-8'))
req = request.Request("http://127.0.0.1:8188/prompt", data=data)
request.urlopen(req)
def generate_image_stablediff(prompt, negative_prompt, target_file):
payload = {
"prompt": prompt, # extra networks also in prompts
"negative_prompt": negative_prompt,
# "seed": 1,
"steps": 20,
"width": 512,
"height": 512,
"cfg_scale": 7,
"sampler_name": "DPM++ 2M Karras",
"n_iter": 1,
"batch_size": 1,
# example args for x/y/z plot
# "script_name": "x/y/z plot",
# "script_args": [
# 1,
# "10,20",
# [],
# 0,
# "",
# [],
# 0,
# "",
# [],
# True,
# True,
# False,
# False,
# 0,
# False
# ],
# example args for Refiner and ControlNet
# "alwayson_scripts": {
# "ControlNet": {
# "args": [
# {
# "batch_images": "",
# "control_mode": "Balanced",
# "enabled": True,
# "guidance_end": 1,
# "guidance_start": 0,
# "image": {
# "image": encode_file_to_base64(r"B:\path\to\control\img.png"),
# "mask": None # base64, None when not need
# },
# "input_mode": "simple",
# "is_ui": True,
# "loopback": False,
# "low_vram": False,
# "model": "control_v11p_sd15_canny [d14c016b]",
# "module": "canny",
# "output_dir": "",
# "pixel_perfect": False,
# "processor_res": 512,
# "resize_mode": "Crop and Resize",
# "threshold_a": 100,
# "threshold_b": 200,
# "weight": 1
# }
# ]
# },
# "Refiner": {
# "args": [
# True,
# "sd_xl_refiner_1.0",
# 0.5
# ]
# }
# },
# "enable_hr": True,
# "hr_upscaler": "R-ESRGAN 4x+ Anime6B",
# "hr_scale": 2,
# "denoising_strength": 0.5,
# "styles": ['style 1', 'style 2'],
"override_settings": {
# 'sd_model_checkpoint': "sd_xl_base_1.0", # this can use to switch sd model
},
}
stablediff.call_txt2img_api(target_file, **payload)
def render_template_from_string(template_str, context):
template = env.from_string(template_str)
return template.render(context)
def render_template_from_file(template_file, context):
template = env.get_template(template_file)
return template.render(context)