Block Shuffle: A Method for High-resolution Fast Style Transfer with Limited Memory
Paper
•
2008.03706
•
Published
This model has been pushed to the Hub using the PytorchModelHubMixin integration:
This is a PyTorch implementation of the paper : Perceptual Losses for Real-Time Style Transfer and Super-Resolution , it applies a picasso art style on any chosen content image, it takes a second in CPU and less than a second in GPU.
import torch
from huggingface_hub import hf_hub_download
import sys, os
import torchvision.transforms as transforms
from PIL import Image
# Download model file
model_file = hf_hub_download(
repo_id="hajar001/fast-neural-style-transfer",
filename="style_transfer_model.py"
)
sys.path.insert(0, os.path.dirname(model_file))
from style_transfer_model import StyleTransferNet
transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
])
# Load model
model = StyleTransferNet.from_pretrained("hajar001/fast-neural-style-transfer")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.eval()
with torch.no_grad():
# Load and preprocess test image
test_image = Image.open("/content/content_image.jpg").convert("RGB")
test_tensor = transform(test_image).unsqueeze(0).to(device)
# Generate stylized image
stylized_tensor = model(test_tensor)
# Denormalize and convert to PIL
# Reverse the normalization
denorm = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225],
std=[1/0.229, 1/0.224, 1/0.225]
)
stylized_tensor = denorm(stylized_tensor[0])
stylized_tensor = torch.clamp(stylized_tensor, 0, 1)
# Convert to PIL and save
stylized_img = transforms.ToPILImage()(stylized_tensor.cpu())
stylized_img.save("/content/stylized_image.jpg")