mirror of
https://github.com/TencentARC/GFPGAN.git
synced 2026-02-14 13:46:30 +00:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f0562df64 | ||
|
|
eadf03cac8 | ||
|
|
a070b88e9e | ||
|
|
c2e88f8eb8 | ||
|
|
69bcfff4ef | ||
|
|
e5adc0dd06 | ||
|
|
f6d3f70646 | ||
|
|
ad70ce4653 | ||
|
|
d7cb9f77f1 | ||
|
|
06ea21690c | ||
|
|
1d5963b2e6 | ||
|
|
7176e63809 | ||
|
|
3da90f924e | ||
|
|
250b75c364 | ||
|
|
1e1c863dae | ||
|
|
a75e39e323 | ||
|
|
11c3957a8f | ||
|
|
4a7b2cc325 | ||
|
|
99eda83cce | ||
|
|
a87388fd2f | ||
|
|
262ee3399f |
34
.github/workflows/no-response.yml
vendored
Normal file
34
.github/workflows/no-response.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: No Response
|
||||
|
||||
# Modified from: https://raw.githubusercontent.com/github/docs/main/.github/workflows/no-response.yaml
|
||||
|
||||
# **What it does**: Closes issues that don't have enough information to be
|
||||
# actionable.
|
||||
# **Why we have it**: To remove the need for maintainers to remember to check
|
||||
# back on issues periodically to see if contributors have
|
||||
# responded.
|
||||
# **Who does it impact**: Everyone that works on docs or docs-internal.
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
schedule:
|
||||
# Schedule for five minutes after the hour every hour
|
||||
- cron: '5 * * * *'
|
||||
|
||||
jobs:
|
||||
noResponse:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: lee-dohm/no-response@v0.5.0
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
closeComment: >
|
||||
This issue has been automatically closed because there has been no response
|
||||
to our request for more information from the original author. With only the
|
||||
information that is currently in the issue, we don't have enough information
|
||||
to take action. Please reach out if you have or find the answers we need so
|
||||
that we can investigate further.
|
||||
If you still have questions, please improve your description and re-open it.
|
||||
Thanks :-)
|
||||
3
.github/workflows/publish-pip.yml
vendored
3
.github/workflows/publish-pip.yml
vendored
@@ -22,8 +22,7 @@ jobs:
|
||||
- name: Build and install
|
||||
run: rm -rf .eggs && pip install -e .
|
||||
- name: Build for distribution
|
||||
# remove bdist_wheel for pip installation with compiling cuda extensions
|
||||
run: python setup.py sdist
|
||||
run: python setup.py sdist bdist_wheel
|
||||
- name: Publish distribution to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
|
||||
3
.github/workflows/pylint.yml
vendored
3
.github/workflows/pylint.yml
vendored
@@ -20,10 +20,11 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install flake8 yapf isort
|
||||
pip install codespell flake8 isort yapf
|
||||
|
||||
- name: Lint
|
||||
run: |
|
||||
codespell
|
||||
flake8 .
|
||||
isort --check-only --diff gfpgan/ scripts/ inference_gfpgan.py setup.py
|
||||
yapf -r -d gfpgan/ scripts/ inference_gfpgan.py setup.py
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,7 +7,6 @@ wandb/*
|
||||
tmp/*
|
||||
|
||||
version.py
|
||||
.vscode
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
|
||||
@@ -24,6 +24,12 @@ repos:
|
||||
hooks:
|
||||
- id: yapf
|
||||
|
||||
# codespell
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.1.0
|
||||
hooks:
|
||||
- id: codespell
|
||||
|
||||
# pre-commit-hooks
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.2.0
|
||||
|
||||
19
.vscode/settings.json
vendored
Normal file
19
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"editor.wordWrap": "on",
|
||||
"editor.rulers": [
|
||||
80,
|
||||
120
|
||||
],
|
||||
"editor.renderWhitespace": "all",
|
||||
"editor.renderControlCharacters": true,
|
||||
"python.formatting.provider": "yapf",
|
||||
"python.formatting.yapfArgs": [
|
||||
"--style",
|
||||
"{BASED_ON_STYLE = pep8, BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true, SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true, COLUMN_LIMIT = 120}"
|
||||
],
|
||||
"python.linting.flake8Enabled": true,
|
||||
"python.linting.flake8Args": [
|
||||
"max-line-length=120"
|
||||
],
|
||||
}
|
||||
29
README.md
29
README.md
@@ -2,7 +2,8 @@
|
||||
|
||||
[](https://github.com/TencentARC/GFPGAN/releases)
|
||||
[](https://pypi.org/project/gfpgan/)
|
||||
[](https://github.com/TencentARC/GFPGAN/issues)
|
||||
[](https://github.com/TencentARC/GFPGAN/issues)
|
||||
[](https://github.com/TencentARC/GFPGAN/issues)
|
||||
[](https://github.com/TencentARC/GFPGAN/blob/master/LICENSE)
|
||||
[](https://github.com/TencentARC/GFPGAN/blob/master/.github/workflows/pylint.yml)
|
||||
[](https://github.com/TencentARC/GFPGAN/blob/master/.github/workflows/publish-pip.yml)
|
||||
@@ -14,10 +15,22 @@ GFPGAN aims at developing **Practical Algorithm for Real-world Face Restoration*
|
||||
It leverages rich and diverse priors encapsulated in a pretrained face GAN (*e.g.*, StyleGAN2) for blind face restoration.
|
||||
|
||||
:triangular_flag_on_post: **Updates**
|
||||
|
||||
- :white_check_mark: Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See [Gradio Web Demo](https://huggingface.co/spaces/akhaliq/GFPGAN).
|
||||
- :white_check_mark: Support enhancing non-face regions (background) with [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN).
|
||||
- :white_check_mark: We provide a *clean* version of GFPGAN, which does not require CUDA extensions.
|
||||
- :white_check_mark: We provide an updated model without colorizing faces.
|
||||
|
||||
---
|
||||
|
||||
If GFPGAN is helpful in your photos/projects, please help to :star: this repo or recommend it to your friends. Thanks:blush:
|
||||
Other recommended projects:<br>
|
||||
:arrow_forward: [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN): A practical algorithm for general image restoration<br>
|
||||
:arrow_forward: [BasicSR](https://github.com/xinntao/BasicSR): An ppen-source image and video restoration toolbox<br>
|
||||
:arrow_forward: [facexlib](https://github.com/xinntao/facexlib): A collection that provides useful face-relation functions.<br>
|
||||
:arrow_forward: [HandyView](https://github.com/xinntao/HandyView): A PyQt5-based image viewer that is handy for view and comparison. <br>
|
||||
|
||||
---
|
||||
|
||||
### :book: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior
|
||||
|
||||
> [[Paper](https://arxiv.org/abs/2101.04061)]   [[Project Page](https://xinntao.github.io/projects/gfpgan)]   [Demo] <br>
|
||||
@@ -35,7 +48,7 @@ It leverages rich and diverse priors encapsulated in a pretrained face GAN (*e.g
|
||||
- Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html))
|
||||
- [PyTorch >= 1.7](https://pytorch.org/)
|
||||
- Option: NVIDIA GPU + [CUDA](https://developer.nvidia.com/cuda-downloads)
|
||||
- Option: Linux (We have not tested on Windows)
|
||||
- Option: Linux
|
||||
|
||||
### Installation
|
||||
|
||||
@@ -62,6 +75,10 @@ If you want want to use the original model in our paper, please see [PaperModel.
|
||||
|
||||
pip install -r requirements.txt
|
||||
python setup.py develop
|
||||
|
||||
# If you want to enhance the background (non-face) regions with Real-ESRGAN,
|
||||
# you also need to install the realesrgan package
|
||||
pip install realesrgan
|
||||
```
|
||||
|
||||
## :zap: Quick Inference
|
||||
@@ -75,14 +92,18 @@ wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1
|
||||
**Inference!**
|
||||
|
||||
```bash
|
||||
python inference_gfpgan.py --upscale_factor 2 --test_path inputs/whole_imgs --save_root results
|
||||
python inference_gfpgan.py --upscale 2 --test_path inputs/whole_imgs --save_root results
|
||||
```
|
||||
|
||||
If you want want to use the original model in our paper, please see [PaperModel.md](PaperModel.md) for installation and inference.
|
||||
|
||||
## :european_castle: Model Zoo
|
||||
|
||||
- [GFPGANCleanv1-NoCE-C2.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth): No colorization; no CUDA extensions are required. It is still in training. Trained with more data with pre-processing.
|
||||
- [GFPGANv1.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth): The paper model, with colorization.
|
||||
|
||||
You can find **more models (such as the discriminators)** here: [[Google Drive](https://drive.google.com/drive/folders/17rLiFzcUMoQuhLnptDsKolegHWwJOnHu?usp=sharing)], OR [[Tencent Cloud 腾讯微云](https://share.weiyun.com/ShYoCCoc)]
|
||||
|
||||
## :computer: Training
|
||||
|
||||
We provide the training codes for GFPGAN (used in our paper). <br>
|
||||
|
||||
@@ -20,7 +20,7 @@ class StyleGAN2GeneratorSFT(StyleGAN2Generator):
|
||||
StyleGAN2. Default: 2.
|
||||
resample_kernel (list[int]): A list indicating the 1D resample kernel
|
||||
magnitude. A cross production will be applied to extent 1D resample
|
||||
kenrel to 2D resample kernel. Default: [1, 3, 3, 1].
|
||||
kernel to 2D resample kernel. Default: [1, 3, 3, 1].
|
||||
lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
|
||||
"""
|
||||
|
||||
|
||||
@@ -230,7 +230,7 @@ class GFPGANv1Clean(nn.Module):
|
||||
self.stylegan_decoder.load_state_dict(
|
||||
torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema'])
|
||||
if fix_decoder:
|
||||
for name, param in self.stylegan_decoder.named_parameters():
|
||||
for _, param in self.stylegan_decoder.named_parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
# for SFT
|
||||
|
||||
@@ -76,7 +76,8 @@ class GFPGANer():
|
||||
else:
|
||||
self.face_helper.read_image(img)
|
||||
# get face landmarks for each face
|
||||
self.face_helper.get_face_landmarks_5(only_center_face=only_center_face)
|
||||
self.face_helper.get_face_landmarks_5(only_center_face=only_center_face, eye_dist_threshold=5)
|
||||
# eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels
|
||||
# align and warp each face
|
||||
self.face_helper.align_warp_face()
|
||||
|
||||
|
||||
@@ -17,13 +17,19 @@ def main():
|
||||
parser.add_argument('--channel', type=int, default=2)
|
||||
parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/GFPGANCleanv1-NoCE-C2.pth')
|
||||
parser.add_argument('--bg_upsampler', type=str, default='realesrgan')
|
||||
parser.add_argument('--bg_tile', type=int, default=0)
|
||||
parser.add_argument('--bg_tile', type=int, default=400)
|
||||
parser.add_argument('--test_path', type=str, default='inputs/whole_imgs')
|
||||
parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces')
|
||||
parser.add_argument('--only_center_face', action='store_true')
|
||||
parser.add_argument('--aligned', action='store_true')
|
||||
parser.add_argument('--paste_back', action='store_false')
|
||||
parser.add_argument('--save_root', type=str, default='results')
|
||||
parser.add_argument(
|
||||
'--ext',
|
||||
type=str,
|
||||
default='auto',
|
||||
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
|
||||
args = parser.parse_args()
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.test_path.endswith('/'):
|
||||
@@ -71,7 +77,7 @@ def main():
|
||||
for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)):
|
||||
# save cropped face
|
||||
save_crop_path = os.path.join(args.save_root, 'cropped_faces', f'{basename}_{idx:02d}.png')
|
||||
imwrite(restored_face, save_crop_path)
|
||||
imwrite(cropped_face, save_crop_path)
|
||||
# save restored face
|
||||
if args.suffix is not None:
|
||||
save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png'
|
||||
@@ -82,12 +88,20 @@ def main():
|
||||
# save cmp image
|
||||
cmp_img = np.concatenate((cropped_face, restored_face), axis=1)
|
||||
imwrite(cmp_img, os.path.join(args.save_root, 'cmp', f'{basename}_{idx:02d}.png'))
|
||||
|
||||
# save restored img
|
||||
if args.suffix is not None:
|
||||
save_restore_path = os.path.join(args.save_root, 'restored_imgs', f'{basename}_{args.suffix}{ext}')
|
||||
else:
|
||||
save_restore_path = os.path.join(args.save_root, 'restored_imgs', img_name)
|
||||
imwrite(restored_img, save_restore_path)
|
||||
if restored_img is not None:
|
||||
if args.ext == 'auto':
|
||||
extension = ext[1:]
|
||||
else:
|
||||
extension = args.ext
|
||||
|
||||
if args.suffix is not None:
|
||||
save_restore_path = os.path.join(args.save_root, 'restored_imgs',
|
||||
f'{basename}_{args.suffix}.{extension}')
|
||||
else:
|
||||
save_restore_path = os.path.join(args.save_root, 'restored_imgs', f'{basename}.{extension}')
|
||||
imwrite(restored_img, save_restore_path)
|
||||
|
||||
print(f'Results are in the [{args.save_root}] folder.')
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
facexlib
|
||||
lmdb
|
||||
numpy
|
||||
torch>=1.7
|
||||
numpy<1.21 # numba requires numpy<1.21,>=1.17
|
||||
opencv-python
|
||||
torchvision
|
||||
scipy
|
||||
tqdm
|
||||
basicsr>=1.3.4.0
|
||||
facexlib>=0.2.0.3
|
||||
lmdb
|
||||
pyyaml
|
||||
tb-nightly
|
||||
torch>=1.7
|
||||
torchvision
|
||||
tqdm
|
||||
yapf
|
||||
|
||||
@@ -20,3 +20,8 @@ known_first_party = gfpgan
|
||||
known_third_party = basicsr,cv2,facexlib,numpy,torch,torchvision,tqdm
|
||||
no_lines_before = STDLIB,LOCALFOLDER
|
||||
default_section = THIRDPARTY
|
||||
|
||||
[codespell]
|
||||
skip = .git,./docs/build
|
||||
count =
|
||||
quiet-level = 3
|
||||
|
||||
Reference in New Issue
Block a user