Alissonerdx commited on
Commit
f588215
·
verified ·
1 Parent(s): 04e44f0

Upload folder using huggingface_hub

Browse files
step1750/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:015c752c1fe5353de9db74b692ab3defec36bedad08027ac4a830e07ea5bc684
3
+ size 12309850896
step1750/zit_fft_config.toml ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Output path for training runs. Each training run makes a new directory in here.
2
+ output_dir = '/train/outputs'
3
+
4
+ # Dataset config file.
5
+ dataset = '/train/zit_fft_dataset_config.toml'
6
+ # You can have separate eval datasets. Give them a name for Tensorboard metrics.
7
+ # eval_datasets = [
8
+ # {name = 'something', config = 'path/to/eval_dataset.toml'},
9
+ # ]
10
+
11
+ # training settings
12
+
13
+ # I usually set this to a really high value because I don't know how long I want to train.
14
+ epochs = 1000
15
+ # Maximum number of steps to train.
16
+ #max_steps = 5000
17
+ # Batch size of a single forward/backward pass for one GPU.
18
+ # Can also do per-resolution batch sizes, like this: micro_batch_size_per_gpu = [[512, 4], [1024, 1]]
19
+ micro_batch_size_per_gpu = 8
20
+ # For mixed video / image training, you can have a different batch size for images.
21
+ #image_micro_batch_size_per_gpu = 4
22
+ # Pipeline parallelism degree. A single instance of the model is divided across this many GPUs.
23
+ pipeline_stages = 1
24
+ # Number of micro-batches sent through the pipeline for each training step.
25
+ # If pipeline_stages > 1, a higher GAS means better GPU utilization due to smaller pipeline bubbles (where GPUs aren't overlapping computation).
26
+ gradient_accumulation_steps = 1
27
+ # Grad norm clipping.
28
+ gradient_clipping = 1.0
29
+ # Learning rate warmup.
30
+ warmup_steps = 100
31
+ # Force the learning rate to be this value, regardless of what the optimizer or anything else says.
32
+ # Can be used to change learning rate even when resuming from checkpoint.
33
+ force_constant_lr = 1e-4
34
+ # Can be 'constant' or 'linear'. If unset, will default to 'constant', i.e. no LR scheduler.
35
+ #lr_scheduler = 'linear'
36
+
37
+ # Block swapping is supported for Wan, HunyuanVideo, Flux, and Chroma. This value controls the number
38
+ # of blocks kept offloaded to RAM. Increasing it lowers VRAM use, but has a performance penalty. The
39
+ # exactly performance penalty depends on the model and the type of training you are doing (e.g. images vs video).
40
+ # Block swapping only works for LoRA training, and requires pipeline_stages=1.
41
+ #blocks_to_swap = 20
42
+
43
+ # Use pseudo Huber loss with constant c. Only works on models that use the default loss function.
44
+ #pseudo_huber_c = 0.5
45
+
46
+ # eval settings
47
+
48
+ eval_every_n_epochs = 1
49
+ # You can also specify eval frequency using either of these.
50
+ #eval_every_n_steps = 100
51
+ #eval_every_n_examples = 1000
52
+ eval_before_first_step = true
53
+ # Might want to set these lower for eval so that less images get dropped (eval dataset size is usually much smaller than training set).
54
+ # Each size bucket of images/videos is rounded down to the nearest multiple of the global batch size, so higher global batch size means
55
+ # more dropped images. Usually doesn't matter for training but the eval set is much smaller so it can matter.
56
+ eval_micro_batch_size_per_gpu = 1
57
+ # Batch size for images when doing mixed image / video training. Will be micro_batch_size_per_gpu if not set.
58
+ #image_eval_micro_batch_size_per_gpu = 4
59
+ eval_gradient_accumulation_steps = 1
60
+ # If using block swap, you can disable it for eval. Eval uses less memory, so depending on block swapping amount you can maybe get away with
61
+ # doing this, and then eval is much faster.
62
+ #disable_block_swap_for_eval = true
63
+
64
+ # misc settings
65
+
66
+ # Probably want to set this a bit higher if you have a smaller dataset so you don't end up with a million saved models.
67
+ save_every_n_epochs = 1
68
+ # You can also specify save frequency using either of these.
69
+ save_every_n_steps = 250
70
+ #save_every_n_examples = 1000
71
+ # Can checkpoint the training state every n number of epochs or minutes. Set only one of these. You can resume from checkpoints using the --resume_from_checkpoint flag.
72
+ #checkpoint_every_n_epochs = 1
73
+ checkpoint_every_n_minutes = 120
74
+ # Always set to true unless you have a huge amount of VRAM.
75
+ # This can also be 'unsloth' to reduce VRAM even more, with a slight performance hit.
76
+ activation_checkpointing = true
77
+ # Use reentrant activation checkpointing method (set this in addition to `activation_checkpointing`). Might be required for some models
78
+ # when using pipeline parallelism (pipeline_stages>1). Otherwise recommended to not use it.
79
+ #reentrant_activation_checkpointing = true
80
+
81
+ # Controls how Deepspeed decides how to divide layers across GPUs. Probably don't change this.
82
+ partition_method = 'parameters'
83
+ # Alternatively you can use 'manual' in combination with partition_split, which specifies the split points for dividing
84
+ # layers between GPUs. For example, with two GPUs, partition_split=[10] puts layers 0-9 on GPU 0, and the rest on GPU 1.
85
+ # With three GPUs, partition_split=[10, 20] puts layers 0-9 on GPU 0, layers 10-19 on GPU 1, and the rest on GPU 2.
86
+ # Length of partition_split must be pipeline_stages-1.
87
+ #partition_split = [N]
88
+
89
+ # dtype for saving the LoRA or model, if different from training dtype
90
+ save_dtype = 'bfloat16'
91
+ # Batch size for caching latents and text embeddings. Increasing can lead to higher GPU utilization during caching phase but uses more memory.
92
+ caching_batch_size = 1
93
+
94
+ # Number of parallel processes to use in map() calls when caching the dataset. Defaults to min(8, num_cpu_cores) if unset.
95
+ # If you have a lot of cores and multiple GPUs, raising this can increase throughput of caching, but it may use more memory,
96
+ # especially for video data.
97
+ #map_num_proc = 32
98
+
99
+ # Use torch.compile on the model. Can speed up training throughput by a decent amount. Not tested on all models.
100
+ #compile = true
101
+
102
+ # How often deepspeed logs to console.
103
+ steps_per_print = 1
104
+ # How to extract video clips for training from a single input video file.
105
+ # The video file is first assigned to one of the configured frame buckets, but then we must extract one or more clips of exactly the right
106
+ # number of frames for that bucket.
107
+ # single_beginning: one clip starting at the beginning of the video
108
+ # single_middle: one clip from the middle of the video (cutting off the start and end equally)
109
+ # default is single_beginning
110
+ video_clip_mode = 'single_beginning'
111
+
112
+ # By default, the loss graphs in Tensorboard / WandB have step as the x-axis. You can change it to number of examples seen instead.
113
+ #x_axis_examples = true
114
+
115
+ [model]
116
+ type = 'z_image'
117
+ diffusion_model = '/train/models/z_image_turbo_bf16.safetensors'
118
+ vae = '/train/models/ae.safetensors'
119
+ text_encoders = [
120
+ {path = '/train/models/qwen_3_4b.safetensors', type = 'lumina2'}
121
+ ]
122
+ # Use if training Z-Image-Turbo
123
+ merge_adapters = ['/train/models/zimage_turbo_training_adapter_v2.safetensors']
124
+ dtype = 'bfloat16'
125
+ shift = 3.0
126
+ # You can reduce VRAM by having most weights in fp8 (small quality loss).
127
+ #diffusion_model_dtype = 'float8'
128
+
129
+ # For models that support full fine tuning, simply delete or comment out the [adapter] table to FFT.
130
+ #[adapter]
131
+ #type = 'lora'
132
+ #rank = 32
133
+ # Dtype for the LoRA weights you are training.
134
+ #dtype = 'bfloat16'
135
+ # You can initialize the lora weights from a previously trained lora.
136
+ #init_from_existing = '/data/diffusion_pipe_training_runs/something/epoch50'
137
+ # Experimental. Can fuse LoRAs into the base weights before training. Right now only for Flux.
138
+ #fuse_adapters = [
139
+ # {path = '/data2/imagegen_models/loras/some_lora.safetensors', weight = 1.0}
140
+ #]
141
+
142
+ [optimizer]
143
+ # AdamW from the optimi library is a good default since it automatically uses Kahan summation when training bfloat16 weights.
144
+ # Look at train.py for other options. You could also easily edit the file and add your own.
145
+ type = 'adamw_optimi'
146
+ lr = 4e-5
147
+ betas = [0.9, 0.99]
148
+ weight_decay = 0.01
149
+ eps = 1e-8
150
+
151
+ # Can use this optimizer for a bit less memory usage.
152
+ # [optimizer]
153
+ # type = 'AdamW8bitKahan'
154
+ # lr = 2e-5
155
+ # betas = [0.9, 0.99]
156
+ # weight_decay = 0.01
157
+ # stabilize = false
158
+
159
+ # Automagic optimizer from AI-Toolkit.
160
+ # In my experience, this gives slightly worse results than AdamW with a properly tuned LR, but you can try it.
161
+
162
+ # [optimizer]
163
+ # type = 'automagic'
164
+ # weight_decay = 0.01
165
+
166
+ # Any optimizer not explicitly supported will be dynamically loaded from the pytorch-optimizer library.
167
+ # [optimizer]
168
+ # type = 'Prodigy'
169
+ # lr = 1
170
+ # betas = [0.9, 0.99]
171
+ # weight_decay = 0.01
172
+
173
+ [monitoring]
174
+ # Set to true and fill in these fields to enable wandb
175
+ enable_wandb = false
176
+ wandb_api_key = ''
177
+ wandb_tracker_name = ''
178
+ wandb_run_name = ''