diff --git a/.gitignore b/.gitignore index 2a76c1e..442e0ee 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,6 @@ !mathlib !models !nnlib -!utils \ No newline at end of file +!utils +!Dockerfile* +!*.sh \ No newline at end of file diff --git a/DockerCPU.md b/DockerCPU.md new file mode 100644 index 0000000..1ffbd8d --- /dev/null +++ b/DockerCPU.md @@ -0,0 +1,132 @@ +# For Mac Users +If you just have a **MacBook**.DeepFaceLab **GPU** mode does not works. However,it can also works with **CPU** mode.Follow the Steps below will help you build the **DRE** (DeepFaceLab Runtime Environment) Easier. + +### 1. Open a new terminal and Clone DeepFaceLab with git +``` +$ git git@github.com:iperov/DeepFaceLab.git +``` + +### 2. Change the directory to DeepFaceLab +``` +$ cd DeepFaceLab +``` + +### 3. Install Docker + +[Docker Desktop for Mac](https://hub.docker.com/editions/community/docker-ce-desktop-mac) + +### 4. Build Docker Image For DeepFaceLab + +``` +$ docker build -t deepfacelab-cpu -f Dockerfile.cpu . +``` + +### 5. Mount DeepFaceLab volume and Run it + +``` +$ docker run -p 8888:8888 --hostname deepfacelab-cpu --name deepfacelab-cpu -v $PWD:/notebooks deepfacelab-cpu +``` + +PS: Because your current directory is `DeepFaceLab`,so `-v $PWD:/notebooks` means Mount `DeepFaceLab` volume to `notebooks` in **Docker** + +And then you will see the log below: + +``` +The Jupyter Notebook is running at: +http://(deepfacelab-cpu or 127.0.0.1):8888/?token=your token +``` + +### 6. Open a new terminal to run DeepFaceLab in /notebooks + +``` +$ docker exec -it deepfacelab-cpu bash +$ ls -A +``` + +### 7. Use jupyter in deepfacelab-cpu bash + +``` +$ jupyter notebook list +``` +or just open it on your browser `http://127.0.0.1:8888/?token=your_token` + +PS: You can run python with jupyter.However,we just run our code in bash.It's simpler and clearer.Now the **DRE** (DeepFaceLab Runtime Environment) almost builded. + +### 8. Stop or Kill Docker Container + +``` +$ docker stop deepfacelab-cpu +$ docker kill deepfacelab-cpu +``` + +### 9. Start Docker Container + +``` +# start docker container +$ docker start deepfacelab-cpu +# open bash to run deepfacelab +$ docker exec -it deepfacelab-cpu bash +``` + +PS: `STEP 8` or `STEP 9` just show you the way to stop and start **DRE**. + +### 10. enjoy it + +``` +# make sure you current directory is `/notebooks` +$ pwd +# make sure all `DeepFaceLab` code is in current path `/notebooks` +$ ls -a +# read and write permission +$ chmod +x cpu.sh +# run `DeepFaceLab` +$ ./cpu.sh +``` + +### Details with `DeepFaceLab` + +#### 1. Concepts + +![SRC](doc/DF_Cage_0.jpg) + +In our Case,**Cage**'s Face is **SRC Face**,and **Trump**'s Face is **DST Face**.and finally we get the **Result** below. + +![Result](doc/merged-face.jpg) + +So,before you run `./cpu.sh`.You should be aware of this. + +#### 2. Use MTCNN(mt) to extract faces +Do not use DLIB extractor in CPU mode + +#### 3. Best practice for SORT +1) delete first unsorted aligned groups of images what you can to delete. + +2) use `hist` + +#### 4. Use `H64 model` to train and convert +Only H64 model reasonable to train on home CPU.You can choice other model like **H128 (3GB+)** | **DF (5GB+)** and so on ,it depends entirely on your CPU performance. + +#### 5. execute the script below one by one + +``` +root@deepfacelab-cpu:/notebooks# ./cpu.sh +1) clear workspace 7) data_dst sort by hist +2) extract PNG from video data_src 8) train +3) data_src extract faces 9) convert +4) data_src sort 10) converted to mp4 +5) extract PNG from video data_dst 11) quit +6) data_dst extract faces +Please enter your choice: +``` + +#### 6. Put all videos in `workspace` directory +``` +. +├── data_dst +├── data_src +├── dst.mp4 +├── model +└── src.mp4 + +3 directories, 2 files +``` diff --git a/Dockerfile.cpu b/Dockerfile.cpu new file mode 100644 index 0000000..dc1b8d1 --- /dev/null +++ b/Dockerfile.cpu @@ -0,0 +1,17 @@ +FROM tensorflow/tensorflow:latest-py3 + +RUN apt-get update -qq -y \ + && apt-get install -y libsm6 libxrender1 libxext-dev python3-tk\ + && apt-get install -y ffmpeg \ + && apt-get install -y wget \ + && apt-get install -y vim \ + && apt-get install -y git \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements-cpu-docker.txt /opt/ +RUN pip3 install cmake +RUN pip3 --no-cache-dir install -r /opt/requirements-cpu-docker.txt && rm /opt/requirements-cpu-docker.txt + +WORKDIR "/notebooks" +CMD ["/run_jupyter.sh", "--allow-root"] diff --git a/README.md b/README.md index 1794088..bf29df7 100644 --- a/README.md +++ b/README.md @@ -176,6 +176,9 @@ Video tutorial: https://www.youtube.com/watch?v=K98nTNjXkq8 Windows 10 consumes % of VRAM even if card unused for video output. +### For Mac Users +Check out [DockerCPU.md](DockerCPU.md) for more detailed instructions. + ### **Problem of the year**: algorithm of overlaying neural face onto video face located in ConverterMasked.py. diff --git a/cpu.sh b/cpu.sh new file mode 100755 index 0000000..80c88e9 --- /dev/null +++ b/cpu.sh @@ -0,0 +1,71 @@ +#!/bin/bash +INTERNAL_DIR=`pwd` +WORKSPACE=$INTERNAL_DIR/workspace +PYTHON=`which python` + +PS3="Please enter your choice: " +options=("clear workspace" "extract PNG from video data_src" "data_src extract faces" "data_src sort" "extract PNG from video data_dst" "data_dst extract faces" "data_dst sort by hist" "train" "convert" "converted to mp4" "quit") +select opt in "${options[@]}" +do + case $opt in + "clear workspace" ) + echo -n "Clean up workspace? [Y/n] "; read workspace_ans + if [ "$workspace_ans" == "Y" ] || [ "$workspace_ans" == "y" ]; then + rm -rf $WORKSPACE + mkdir -p $WORKSPACE/data_src/aligned + mkdir -p $WORKSPACE/data_dst/aligned + mkdir -p $WORKSPACE/model + echo "Workspace has been successfully cleaned!" + fi + ;; + "extract PNG from video data_src" ) + echo -n "File name: "; read filename + echo -n "FPS: "; read fps + if [ -z "$fps" ]; then fps="25"; fi + ffmpeg -i $WORKSPACE/$filename -r $fps $WORKSPACE/data_src/%04d.png -loglevel error + ;; + "data_src extract faces" ) + echo -n "Detector? [mt | manual] "; read detector + $PYTHON $INTERNAL_DIR/main.py extract --input-dir $WORKSPACE/data_src --output-dir $WORKSPACE/data_src/aligned --detector $detector --debug --cpu-only + ;; + "data_src sort" ) + echo -n "Sort by? [blur | brightness | face-yaw | hue | hist | hist-blur | hist-dissim] "; read sort_method + $PYTHON $INTERNAL_DIR/main.py sort --input-dir $WORKSPACE/data_src/aligned --by $sort_method + ;; + "extract PNG from video data_dst" ) + echo -n "File name: "; read filename + echo -n "FPS: "; read fps + if [ -z "$fps" ]; then fps="25"; fi + ffmpeg -i $WORKSPACE/$filename -r $fps $WORKSPACE/data_dst/%04d.png -loglevel error + ;; + "data_dst extract faces" ) + echo -n "Detector? [mt | manual] "; read detector + $PYTHON $INTERNAL_DIR/main.py extract --input-dir $WORKSPACE/data_dst --output-dir $WORKSPACE/data_dst/aligned --detector $detector --debug --cpu-only + ;; + "data_dst sort by hist" ) + $PYTHON $INTERNAL_DIR/main.py sort --input-dir $WORKSPACE/data_dst/aligned --by hist + ;; + "train" ) + echo -n "Model? [ H64 (2GB+) | H128 (3GB+) | DF (5GB+) | LIAEF128 (5GB+) | LIAEF128YAW (5GB+) | MIAEF128 (5GB+) | AVATAR (4GB+) ] "; read model + echo -n "Show Preview? [Y/n] "; read preview + if [ "$preview" == "Y" ] || [ "$preview" == "y" ]; then preview="--preview"; else preview=""; fi + $PYTHON $INTERNAL_DIR/main.py train --training-data-src-dir $WORKSPACE/data_src/aligned --training-data-dst-dir $WORKSPACE/data_dst/aligned --model-dir $WORKSPACE/model --model $model --cpu-only $preview + ;; + "convert" ) + echo -n "Model? [ H64 (2GB+) | H128 (3GB+) | DF (5GB+) | LIAEF128 (5GB+) | LIAEF128YAW (5GB+) | MIAEF128 (5GB+) | AVATAR(4GB+) ] "; read model + $PYTHON $INTERNAL_DIR/main.py convert --input-dir $WORKSPACE/data_dst --output-dir $WORKSPACE/data_dst/merged --aligned-dir $WORKSPACE/data_dst/aligned --model-dir $WORKSPACE/model --model $model --ask-for-params --cpu-only + ;; + "converted to mp4" ) + echo -n "File name of destination video: "; read filename + echo -n "FPS: "; read fps + if [ -z "$fps" ]; then fps="25"; fi + ffmpeg -y -i $WORKSPACE/$filename -r $fps -i "$WORKSPACE/data_dst/merged/%04d.png" -map 0:a? -map 1:v -r $fps -c:v libx264 -b:v 8M -pix_fmt yuv420p -c:a aac -strict -2 -b:a 192k -ar 48000 "$WORKSPACE/result.mp4" -loglevel error + ;; + "quit" ) + break + ;; + *) + echo "Invalid choice!" + ;; + esac +done diff --git a/doc/merged-face.jpg b/doc/merged-face.jpg new file mode 100644 index 0000000..27ed67f Binary files /dev/null and b/doc/merged-face.jpg differ diff --git a/localization/localization.py b/localization/localization.py index 4ccd9c8..d15be0b 100644 --- a/localization/localization.py +++ b/localization/localization.py @@ -2,7 +2,8 @@ import locale system_locale = locale.getdefaultlocale()[0] -system_language = system_locale[0:2] +# system_locale may be nil +system_language = system_locale[0:2] if system_locale is not None else "en" windows_font_name_map = { 'en' : 'cour', diff --git a/main.py b/main.py index ca62958..0bc322c 100644 --- a/main.py +++ b/main.py @@ -80,6 +80,7 @@ if __name__ == "__main__": model_path=arguments.model_dir, model_name=arguments.model_name, debug = arguments.debug, + preview = arguments.preview, #**options batch_size = arguments.batch_size, write_preview_history = arguments.write_preview_history, @@ -106,8 +107,9 @@ if __name__ == "__main__": train_parser.add_argument('--force-best-gpu-idx', type=int, dest="force_best_gpu_idx", default=-1, help="Force to choose this GPU idx as best(worst).") train_parser.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="MultiGPU option. It will select only same best(worst) GPU models.") train_parser.add_argument('--force-gpu-idxs', type=str, dest="force_gpu_idxs", default=None, help="Override final GPU idxs. Example: 0,1,2.") - train_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.") - + train_parser.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.") + train_parser.add_argument('--preview', action="store_true",dest="preview", default=False, help="Show preview.") + train_parser.set_defaults (func=process_train) def process_convert(arguments): diff --git a/mainscripts/Trainer.py b/mainscripts/Trainer.py index 96b4542..2c6ae83 100644 --- a/mainscripts/Trainer.py +++ b/mainscripts/Trainer.py @@ -277,13 +277,14 @@ def previewThread (input_queue, output_queue): cv2.destroyAllWindows() -def main (training_data_src_dir, training_data_dst_dir, model_path, model_name, **in_options): - print ("Running trainer.\r\n") +def main (training_data_src_dir, training_data_dst_dir, model_path, model_name,preview, **in_options): + print ("Running trainer(preview=%s).\r\n" % (preview)) output_queue = queue.Queue() input_queue = queue.Queue() import threading thread = threading.Thread(target=trainerThread, args=(output_queue, input_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name), kwargs=in_options ) thread.start() - - previewThread (input_queue, output_queue) \ No newline at end of file + + if preview: + previewThread (input_queue, output_queue) \ No newline at end of file diff --git a/nnlib/devicelib.py b/nnlib/devicelib.py index 81f0a7f..37a9261 100644 --- a/nnlib/devicelib.py +++ b/nnlib/devicelib.py @@ -58,7 +58,7 @@ class devicelib: try: nvmlInit() nvmlShutdown() - except e: + except: return False return True diff --git a/requirements-cpu-docker.txt b/requirements-cpu-docker.txt new file mode 100644 index 0000000..ba90d7f --- /dev/null +++ b/requirements-cpu-docker.txt @@ -0,0 +1,9 @@ +pathlib==1.0.1 +scandir==1.6 +h5py==2.7.1 +Keras==2.2.4 +opencv-python==3.4.0.12 +scikit-image +dlib==19.10.0 +tqdm +git+https://www.github.com/keras-team/keras-contrib.git