Ver NOOB V-PRED V1 EQ
Trained with improved parameters based on NOOB V-PRED EQ VAE exprimental version: https://civarchive.com/models/1858821/noobai-v-pred-10-with-eq-vae
Ver NOOB E-PRED V1
Trained based on NOOB E-PRED V1.0. Different trigger words set to precise concepts. See about the version for training details.
Updated
Used PonyXL to train this one. Updated dataset to achieve better performance. It enhanced this concept on PonyXL and related models.
With PonyXL many concepts can be easily generated. So I will focus on the weak concept in PonyXL. Concept like hyper_futanari that can easily generated with Pony model will not updated to XL version.
Strength 0.8 recommended.
Introduction
This is the subLyCORIS of hyperfuta | Stable Diffusion LORA | Civitai
Use this to generate futanari with larger glan. It can be used alone. But it seems a little overbaked.
How to use it
I recommand using it in weight 0.6-0.7 or lower combined with hyperfuta LyCORIS.
Not enough tests for other tags. It seemsworks well.
Training details:
Traingset is of about 130 images. They are mirrored before training. Total steps are about 5500.
Regularization = false
resolution=768
batch_size=1
epoch=10
network_dim=32
network_alpha=32
clip_skip=2
Using AdamW8bits as optimizer:
lr="1e-4"
unet_lr="1e-4"
text_encoder_lr="1e-5"
Locon parameters:
conv_dim=4
conv_alpha=4
Description
{
"LoRA_type": "LyCORIS/LoHa",
"LyCORIS_preset": "full",
"adaptive_noise_scale": 0,
"additional_parameters": "--zero_terminal_snr ",
"ae": "",
"apply_t5_attn_mask": false,
"async_upload": false,
"block_alphas": "",
"block_dims": "",
"block_lr_zero_threshold": "",
"blocks_to_swap": 0,
"bucket_no_upscale": true,
"bucket_reso_steps": 64,
"bypass_mode": false,
"cache_latents": false,
"cache_latents_to_disk": false,
"caption_dropout_every_n_epochs": 0,
"caption_dropout_rate": 0,
"caption_extension": ".txt",
"clip_g": "",
"clip_g_dropout_rate": 0,
"clip_l": "",
"clip_skip": 0,
"color_aug": false,
"constrain": 0,
"conv_alpha": 1,
"conv_block_alphas": "",
"conv_block_dims": "",
"conv_dim": 8,
"cpu_offload_checkpointing": false,
"dataset_config": "",
"debiased_estimation_loss": false,
"decompose_both": false,
"dim_from_weights": false,
"discrete_flow_shift": 3,
"dora_wd": false,
"double_blocks_to_swap": 0,
"down_lr_weight": "",
"dynamo_backend": "no",
"dynamo_mode": "default",
"dynamo_use_dynamic": false,
"dynamo_use_fullgraph": false,
"enable_all_linear": false,
"enable_bucket": true,
"epoch": 20,
"extra_accelerate_launch_args": "",
"factor": -1,
"flip_aug": true,
"flux1_cache_text_encoder_outputs": false,
"flux1_cache_text_encoder_outputs_to_disk": false,
"flux1_checkbox": false,
"fp8_base": false,
"fp8_base_unet": false,
"full_bf16": false,
"full_fp16": false,
"ggpo_beta": 0.01,
"ggpo_sigma": 0.03,
"gpu_ids": "",
"gradient_accumulation_steps": 1,
"gradient_checkpointing": true,
"guidance_scale": 3.5,
"highvram": false,
"huber_c": 0.1,
"huber_scale": 1,
"huber_schedule": "snr",
"huggingface_path_in_repo": "",
"huggingface_repo_id": "",
"huggingface_repo_type": "",
"huggingface_repo_visibility": "",
"huggingface_token": "",
"img_attn_dim": "",
"img_mlp_dim": "",
"img_mod_dim": "",
"in_dims": "",
"ip_noise_gamma": 0.1,
"ip_noise_gamma_random_strength": false,
"keep_tokens": 1,
"learning_rate": 1,
"log_config": false,
"log_tracker_config": "",
"log_tracker_name": "",
"log_with": "tensorboard",
"logging_dir": "/root/autodl-tmp/outputs",
"logit_mean": 0,
"logit_std": 1,
"loraplus_lr_ratio": 0,
"loraplus_text_encoder_lr_ratio": 0,
"loraplus_unet_lr_ratio": 0,
"loss_type": "l2",
"lowvram": false,
"lr_scheduler": "cosine",
"lr_scheduler_args": "",
"lr_scheduler_num_cycles": 1,
"lr_scheduler_power": 1,
"lr_scheduler_type": "",
"lr_warmup": 0,
"lr_warmup_steps": 0,
"main_process_port": 0,
"masked_loss": false,
"max_bucket_reso": 4096,
"max_data_loader_n_workers": 0,
"max_grad_norm": 1,
"max_resolution": "1024,1024",
"max_timestep": 1000,
"max_token_length": 225,
"max_train_epochs": 0,
"max_train_steps": 0,
"mem_eff_attn": false,
"mem_eff_save": false,
"metadata_author": "",
"metadata_description": "",
"metadata_license": "",
"metadata_tags": "",
"metadata_title": "",
"mid_lr_weight": "",
"min_bucket_reso": 1024,
"min_snr_gamma": 5,
"min_timestep": 0,
"mixed_precision": "bf16",
"mode_scale": 1.29,
"model_list": "custom",
"model_prediction_type": "sigma_scaled",
"module_dropout": 0,
"multi_gpu": false,
"multires_noise_discount": 0.1,
"multires_noise_iterations": 6,
"network_alpha": 1,
"network_dim": 16,
"network_dropout": 0.1,
"network_weights": "",
"noise_offset": 0,
"noise_offset_random_strength": false,
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"num_machines": 1,
"num_processes": 1,
"optimizer": "prodigyplus.ProdigyPlusScheduleFree",
"optimizer_args": "\"weight_decay=0\" \"d_coef=1\" \"use_bias_correction=True\" \"betas=0.9,0.999\"",
"output_dir": "/root/autodl-tmp/outputs",
"output_name": "big_glans-NAI-VPRED1.0-V1-loha-dim16conv8alpha1-drop0.1-SNR5-EQVAE",
"persistent_data_loader_workers": false,
"pos_emb_random_crop_rate": 0,
"pretrained_model_name_or_path": "/root/kohya_ss/models/noobaiVPred10WithEQ_experimentalEQVAE.safetensors",
"prior_loss_weight": 1,
"random_crop": false,
"rank_dropout": 0,
"rank_dropout_scale": false,
"reg_data_dir": "",
"rescaled": false,
"resume": "",
"resume_from_huggingface": "",
"sample_every_n_epochs": 1,
"sample_every_n_steps": 0,
"sample_prompts": "huge glans, 1girl, large testicles, large penis, cat ears, solo, white thighhighs, nipples, futanari, erection, long hair, veiny penis, cat tail, navel, tongue out, grabbing own breast, blush, open mouth, bridal gauntlets, white elbow gloves, large breasts, looking at viewer, nude, very awa, masterpiece, best quality, good quality, absurdres, highres, newest, very aesthetic, --n worst aesthetic, worst quality, normal quality, lowres, old, early, mid, recent, ambiguous form, bad anatomy, bad fingers, bad hands, watermark, username, logo, signature, bar censor, mosaic censoring --w 1024 --h 1344 --l 5 --s 28",
"sample_sampler": "euler",
"save_clip": false,
"save_every_n_epochs": 1,
"save_every_n_steps": 0,
"save_last_n_epochs": 0,
"save_last_n_epochs_state": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_model_as": "safetensors",
"save_precision": "bf16",
"save_state": false,
"save_state_on_train_end": false,
"save_state_to_huggingface": false,
"save_t5xxl": false,
"scale_v_pred_loss_like_noise_pred": false,
"scale_weight_norms": 0,
"sd3_cache_text_encoder_outputs": false,
"sd3_cache_text_encoder_outputs_to_disk": false,
"sd3_checkbox": false,
"sd3_clip_l": "",
"sd3_clip_l_dropout_rate": 0,
"sd3_disable_mmap_load_safetensors": false,
"sd3_enable_scaled_pos_embed": false,
"sd3_fused_backward_pass": false,
"sd3_t5_dropout_rate": 0,
"sd3_t5xxl": "",
"sd3_text_encoder_batch_size": 1,
"sdxl": true,
"sdxl_cache_text_encoder_outputs": false,
"sdxl_no_half_vae": true,
"seed": 31337,
"shuffle_caption": true,
"single_blocks_to_swap": 0,
"single_dim": "",
"single_mod_dim": "",
"skip_cache_check": false,
"split_mode": false,
"split_qkv": false,
"stop_text_encoder_training": 0,
"t5xxl": "",
"t5xxl_device": "",
"t5xxl_dtype": "bf16",
"t5xxl_lr": 0,
"t5xxl_max_token_length": 512,
"text_encoder_lr": 1,
"timestep_sampling": "sigma",
"train_batch_size": 5,
"train_blocks": "all",
"train_data_dir": "/root/autodl-tmp/big_glans",
"train_double_block_indices": "all",
"train_lora_ggpo": false,
"train_norm": false,
"train_on_input": true,
"train_single_block_indices": "all",
"train_t5xxl": false,
"training_comment": "",
"txt_attn_dim": "",
"txt_mlp_dim": "",
"txt_mod_dim": "",
"unet_lr": 1,
"unit": 1,
"up_lr_weight": "",
"use_cp": false,
"use_scalar": false,
"use_tucker": false,
"v2": false,
"v_parameterization": true,
"v_pred_like_loss": 0,
"vae": "",
"vae_batch_size": 0,
"wandb_api_key": "",
"wandb_run_name": "",
"weighted_captions": false,
"weighting_scheme": "logit_normal",
"xformers": "xformers"
}FAQ
Comments (2)
Could you also create an Illustrious XL version, perhaps?
NAI based lora fits most of IL models. You may try use it on IL model directly.







