Generate personalized artwork by learning from visual preferences - no artistic skills required
"Perfection is just a few swipes away"
PLGL was pioneered at SkinDeep.ai for personalized face generation using StyleGAN. Users would rate generated faces with simple swipes, and the system would learn their preferences to generate their "perfect" face - all without any prompting or description.
The original implementation used StyleGAN's 512-dimensional latent space. Each point in this space represents a unique image:
# Original SkinDeep.ai approach (simplified)
import numpy as np
from sklearn.svm import SVC
class VisualPreferenceLearner:
def __init__(self, generator, latent_dim=512):
self.generator = generator # StyleGAN
self.latent_dim = latent_dim
self.samples = []
self.ratings = []
def generate_image(self, z=None):
"""Generate image from latent code"""
if z is None:
z = np.random.randn(1, self.latent_dim)
# StyleGAN generation
image = self.generator.run(z, None,
truncation_psi=0.7,
randomize_noise=False)
return image[0]
Users provide feedback through intuitive actions - no complex rating scales:
def collect_preferences(self, n_samples=100):
"""Collect user preferences through swipe interface"""
for i in range(n_samples):
# Generate diverse samples
z = np.random.randn(1, self.latent_dim)
image = self.generate_image(z)
# User swipes left (dislike=0) or right (like=1)
rating = get_user_swipe(image)
self.samples.append(z)
self.ratings.append(rating)
print(f"Sample {i+1}: {'Liked' if rating else 'Disliked'}")
A Support Vector Machine learns the boundary between liked and disliked images in latent space:
def train_preference_model(self):
"""Train SVM classifier on latent space"""
X = np.array(self.samples).reshape(-1, self.latent_dim)
y = np.array(self.ratings)
# SVM works well in high-dimensional latent spaces
self.classifier = SVC(kernel='rbf', probability=True)
self.classifier.fit(X, y)
accuracy = self.classifier.score(X, y)
print(f"Preference model accuracy: {accuracy:.2%}")
The key innovation: compute the optimal latent vector directly from the trained classifier:
def reverse_classify(self, target_score=0.99):
"""Find latent vector that maximizes preference score"""
# Get SVM weights and bias
weights = self.classifier.coef_[0]
bias = self.classifier.intercept_[0]
# Initialize output
optimal_z = np.zeros(self.latent_dim)
# Compute optimal value for each dimension
# This is the "reverse" of classification
x = np.log(target_score / (1 - target_score)) - bias
for i in np.random.permutation(self.latent_dim):
y = x / weights[i]
if abs(y) >= 1.0:
# Clip to valid range
optimal_z[i] = np.sign(y) * 1.0
x -= weights[i] * optimal_z[i]
else:
# Final dimension
optimal_z[i] = y
break
return optimal_z
Generate diverse samples while focusing on high-preference regions:
def generate_batch(self, n_samples=64, exploitation_ratio=0.7):
"""70% exploitation (refine preferences) + 30% exploration"""
batch = []
n_exploit = int(n_samples * exploitation_ratio)
n_explore = n_samples - n_exploit
# Exploitation: Generate near high-confidence regions
for _ in range(n_exploit):
z = np.random.randn(1, self.latent_dim)
# Only keep if predicted preference is high
score = self.classifier.predict_proba([z])[0, 1]
if score > 0.7:
batch.append(z)
# Exploration: Random samples for diversity
for _ in range(n_explore):
z = np.random.randn(1, self.latent_dim)
batch.append(z)
return batch
Generate artwork matching your aesthetic preferences without describing styles or using prompts.
Create room designs by rating examples - the AI learns your style preferences.
Design clothing and accessories that match personal style through simple ratings.
Create brand logos by rating designs - no need to describe abstract concepts.
Generate cohesive NFT collections that match collector preferences.
Design personalized avatars through preference learning instead of sliders.
PLGL works with any generative model that has a latent space:
# Example with Stable Diffusion
class StableDiffusionPLGL:
def __init__(self, sd_pipeline):
self.pipeline = sd_pipeline
self.latent_dim = 4 * 64 * 64 # SD latent shape
def generate_from_latent(self, latent):
"""Generate image from latent code"""
# Reshape to SD format
latent = latent.reshape(1, 4, 64, 64)
# Decode through VAE
with torch.no_grad():
image = self.pipeline.vae.decode(latent)
return self.pipeline.image_processor.postprocess(image)[0]
def navigate_latent_space(self, direction, step_size=0.1):
"""Move through latent space in preference direction"""
return self.current_latent + direction * step_size
Ready to build preference-based visual generation?