Someone mentioned that there's not much mecha spaceships.
Decided to make one.
[additional_network_arguments]
unet_lr = 0.0005
text_encoder_lr = 0
network_dim = 128
network_alpha = 16
network_module = "networks.lora_flux"
network_train_unet_only = true
the large file is generally due to the above as far as i can tell.[optimizer_arguments]
learning_rate = 0.0005
lr_scheduler = "cosine_with_restarts"
lr_scheduler_num_cycles = 3
lr_warmup_steps = 0
optimizer_type = "Adafactor"
optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",]
[training_arguments]
max_train_steps = 0
max_train_epochs = 31
save_every_n_epochs = 1
sample_every_n_epochs = 1
sample_prompts = xxxxxxxxxx
sample_sampler = "euler_a"
train_batch_size = 2
noise_offset = 0.1
clip_skip = 2
weighted_captions = false
max_token_length = 225
lowram = false
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
save_precision = "bf16"
mixed_precision = "bf16"
output_dir = xxxxxxxxxx
logging_dir = xxxxxxxxxx
output_name = "sdf-1_proto_2025_02_01"
save_state = false
xformers = false
sdpa = true
no_half_vae = false
gradient_checkpointing = true
gradient_accumulation_steps = 1
highvram = true
fp8_base = true
timestep_sampling = "sigmoid"
model_prediction_type = "raw"
guidance_scale = 1.0
loss_type = "l2"
clip_l = xxxxxxxxxx
ae = xxxxxxxxxx
t5xxl = xxxxxxxxxx
[advanced_training_config]
multires_noise_iterations = 6
multires_noise_discount = 0.3
min_snr_gamma = 5.0
v2 = false
[saving_arguments]
save_model_as = "safetensors"
[dreambooth_arguments]
prior_loss_weight = 1.0
[dataset_arguments]
cache_latents = true
cache_text_encoder_outputs = true
cache_text_encoder_ouutputs_to_disk = true
Description
Details
Available On (1 platform)
Same model published on other platforms. May have additional downloads or version variants.