From 0572e6900e19885e268b76ef03e750a35d1d41ea Mon Sep 17 00:00:00 2001 From: Laurent Olivier Date: Sun, 9 Jan 2022 21:56:13 +0100 Subject: [PATCH] Did structural works for conversion --- facelib/S3FDExtractor.py | 147 ++++++++++++++++++++------------------- 1 file changed, 74 insertions(+), 73 deletions(-) diff --git a/facelib/S3FDExtractor.py b/facelib/S3FDExtractor.py index 0e743e3..b108633 100644 --- a/facelib/S3FDExtractor.py +++ b/facelib/S3FDExtractor.py @@ -3,13 +3,14 @@ from pathlib import Path import cv2 import numpy as np - -from core.leras import nn +import tensorflow +import torch +from torch import nn +from torch.nn import functional as F class S3FDExtractor(object): def __init__(self, place_model_on_cpu=False): - nn.initialize(data_format="NHWC") - tf = nn.tf + # nn.initialize(data_format="NHWC") model_path = Path(__file__).parent / "S3FD.npy" if not model_path.exists(): @@ -21,14 +22,14 @@ class S3FDExtractor(object): super().__init__(**kwargs) def build_weights(self): - self.weight = tf.get_variable ("weight", (1, 1, 1, self.n_channels), dtype=nn.floatx, initializer=tf.initializers.ones ) + self.weight = torch.ones([1, 1, 1, self.n_channels], dtype=torch.float64) def get_weights(self): return [self.weight] def __call__(self, inputs): x = inputs - x = x / (tf.sqrt( tf.reduce_sum( tf.pow(x, 2), axis=-1, keepdims=True ) ) + 1e-10) * self.weight + x = x / (torch.sqrt(torch.sum(torch.pow(x, 2), axis=-1, keepdims=True ) ) + 1e-10) * self.weight return x class S3FD(nn.ModelBase): @@ -36,96 +37,96 @@ class S3FDExtractor(object): super().__init__(name='S3FD') def on_build(self): - self.minus = tf.constant([104,117,123], dtype=nn.floatx ) - self.conv1_1 = nn.Conv2D(3, 64, kernel_size=3, strides=1, padding='SAME') - self.conv1_2 = nn.Conv2D(64, 64, kernel_size=3, strides=1, padding='SAME') + self.minus = torch.Tensor([104,117,123]) + self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding='same') + self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding='same') - self.conv2_1 = nn.Conv2D(64, 128, kernel_size=3, strides=1, padding='SAME') - self.conv2_2 = nn.Conv2D(128, 128, kernel_size=3, strides=1, padding='SAME') + self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding='same') + self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding='same') - self.conv3_1 = nn.Conv2D(128, 256, kernel_size=3, strides=1, padding='SAME') - self.conv3_2 = nn.Conv2D(256, 256, kernel_size=3, strides=1, padding='SAME') - self.conv3_3 = nn.Conv2D(256, 256, kernel_size=3, strides=1, padding='SAME') + self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding='same') + self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding='same') + self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding='same') - self.conv4_1 = nn.Conv2D(256, 512, kernel_size=3, strides=1, padding='SAME') - self.conv4_2 = nn.Conv2D(512, 512, kernel_size=3, strides=1, padding='SAME') - self.conv4_3 = nn.Conv2D(512, 512, kernel_size=3, strides=1, padding='SAME') + self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding='same') + self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding='same') + self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding='same') - self.conv5_1 = nn.Conv2D(512, 512, kernel_size=3, strides=1, padding='SAME') - self.conv5_2 = nn.Conv2D(512, 512, kernel_size=3, strides=1, padding='SAME') - self.conv5_3 = nn.Conv2D(512, 512, kernel_size=3, strides=1, padding='SAME') + self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding='same') + self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding='same') + self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding='same') - self.fc6 = nn.Conv2D(512, 1024, kernel_size=3, strides=1, padding=3) - self.fc7 = nn.Conv2D(1024, 1024, kernel_size=1, strides=1, padding='SAME') + self.fc6 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=3) + self.fc7 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding='same') - self.conv6_1 = nn.Conv2D(1024, 256, kernel_size=1, strides=1, padding='SAME') - self.conv6_2 = nn.Conv2D(256, 512, kernel_size=3, strides=2, padding='SAME') + self.conv6_1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding='same') + self.conv6_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding='same') - self.conv7_1 = nn.Conv2D(512, 128, kernel_size=1, strides=1, padding='SAME') - self.conv7_2 = nn.Conv2D(128, 256, kernel_size=3, strides=2, padding='SAME') + self.conv7_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding='same') + self.conv7_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding='same') self.conv3_3_norm = L2Norm(256) self.conv4_3_norm = L2Norm(512) self.conv5_3_norm = L2Norm(512) - self.conv3_3_norm_mbox_conf = nn.Conv2D(256, 4, kernel_size=3, strides=1, padding='SAME') - self.conv3_3_norm_mbox_loc = nn.Conv2D(256, 4, kernel_size=3, strides=1, padding='SAME') + self.conv3_3_norm_mbox_conf = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding='same') + self.conv3_3_norm_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding='same') - self.conv4_3_norm_mbox_conf = nn.Conv2D(512, 2, kernel_size=3, strides=1, padding='SAME') - self.conv4_3_norm_mbox_loc = nn.Conv2D(512, 4, kernel_size=3, strides=1, padding='SAME') + self.conv4_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding='same') + self.conv4_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding='same') - self.conv5_3_norm_mbox_conf = nn.Conv2D(512, 2, kernel_size=3, strides=1, padding='SAME') - self.conv5_3_norm_mbox_loc = nn.Conv2D(512, 4, kernel_size=3, strides=1, padding='SAME') + self.conv5_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding='same') + self.conv5_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding='same') - self.fc7_mbox_conf = nn.Conv2D(1024, 2, kernel_size=3, strides=1, padding='SAME') - self.fc7_mbox_loc = nn.Conv2D(1024, 4, kernel_size=3, strides=1, padding='SAME') + self.fc7_mbox_conf = nn.Conv2d(1024, 2, kernel_size=3, stride=1, padding='same') + self.fc7_mbox_loc = nn.Conv2d(1024, 4, kernel_size=3, stride=1, padding='same') - self.conv6_2_mbox_conf = nn.Conv2D(512, 2, kernel_size=3, strides=1, padding='SAME') - self.conv6_2_mbox_loc = nn.Conv2D(512, 4, kernel_size=3, strides=1, padding='SAME') + self.conv6_2_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding='same') + self.conv6_2_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding='same') - self.conv7_2_mbox_conf = nn.Conv2D(256, 2, kernel_size=3, strides=1, padding='SAME') - self.conv7_2_mbox_loc = nn.Conv2D(256, 4, kernel_size=3, strides=1, padding='SAME') + self.conv7_2_mbox_conf = nn.Conv2d(256, 2, kernel_size=3, stride=1, padding='same') + self.conv7_2_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding='same') def forward(self, inp): x, = inp x = x - self.minus - x = tf.nn.relu(self.conv1_1(x)) - x = tf.nn.relu(self.conv1_2(x)) - x = tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], "VALID") + x = nn.ReLU(self.conv1_1(x)) + x = nn.ReLU(self.conv1_2(x)) + x = F.max_pool2d(x, kernel_size=[1,2,2,1], stride=[1,2,2,1], padding="valid") - x = tf.nn.relu(self.conv2_1(x)) - x = tf.nn.relu(self.conv2_2(x)) - x = tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], "VALID") + x = nn.ReLU(self.conv2_1(x)) + x = nn.ReLU(self.conv2_2(x)) + x = F.max_pool2d(x, kernel_size=[1,2,2,1], stride=[1,2,2,1], padding="valid") - x = tf.nn.relu(self.conv3_1(x)) - x = tf.nn.relu(self.conv3_2(x)) - x = tf.nn.relu(self.conv3_3(x)) + x = nn.ReLU(self.conv3_1(x)) + x = nn.ReLU(self.conv3_2(x)) + x = nn.ReLU(self.conv3_3(x)) f3_3 = x - x = tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], "VALID") + x = F.max_pool2d(x, kernel_size=[1,2,2,1], stride=[1,2,2,1], padding="valid") - x = tf.nn.relu(self.conv4_1(x)) - x = tf.nn.relu(self.conv4_2(x)) - x = tf.nn.relu(self.conv4_3(x)) + x = nn.ReLU(self.conv4_1(x)) + x = nn.ReLU(self.conv4_2(x)) + x = nn.ReLU(self.conv4_3(x)) f4_3 = x - x = tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], "VALID") + x = F.max_pool2d(x, kernel_size=[1,2,2,1], stride=[1,2,2,1], padding="valid") - x = tf.nn.relu(self.conv5_1(x)) - x = tf.nn.relu(self.conv5_2(x)) - x = tf.nn.relu(self.conv5_3(x)) + x = nn.ReLU(self.conv5_1(x)) + x = nn.ReLU(self.conv5_2(x)) + x = nn.ReLU(self.conv5_3(x)) f5_3 = x - x = tf.nn.max_pool(x, [1,2,2,1], [1,2,2,1], "VALID") + x = F.max_pool2d(x, kernel_size=[1,2,2,1], stride=[1,2,2,1], padding="valid") - x = tf.nn.relu(self.fc6(x)) - x = tf.nn.relu(self.fc7(x)) + x = nn.ReLU(self.fc6(x)) + x = nn.ReLU(self.fc7(x)) ffc7 = x - x = tf.nn.relu(self.conv6_1(x)) - x = tf.nn.relu(self.conv6_2(x)) + x = nn.ReLU(self.conv6_1(x)) + x = nn.ReLU(self.conv6_2(x)) f6_2 = x - x = tf.nn.relu(self.conv7_1(x)) - x = tf.nn.relu(self.conv7_2(x)) + x = nn.ReLU(self.conv7_1(x)) + x = nn.ReLU(self.conv7_2(x)) f7_2 = x f3_3 = self.conv3_3_norm(f3_3) @@ -135,39 +136,39 @@ class S3FDExtractor(object): cls1 = self.conv3_3_norm_mbox_conf(f3_3) reg1 = self.conv3_3_norm_mbox_loc(f3_3) - cls2 = tf.nn.softmax(self.conv4_3_norm_mbox_conf(f4_3)) + cls2 = nn.Softmax(self.conv4_3_norm_mbox_conf(f4_3)) reg2 = self.conv4_3_norm_mbox_loc(f4_3) - cls3 = tf.nn.softmax(self.conv5_3_norm_mbox_conf(f5_3)) + cls3 = nn.Softmax(self.conv5_3_norm_mbox_conf(f5_3)) reg3 = self.conv5_3_norm_mbox_loc(f5_3) - cls4 = tf.nn.softmax(self.fc7_mbox_conf(ffc7)) + cls4 = nn.Softmax(self.fc7_mbox_conf(ffc7)) reg4 = self.fc7_mbox_loc(ffc7) - cls5 = tf.nn.softmax(self.conv6_2_mbox_conf(f6_2)) + cls5 = nn.Softmax(self.conv6_2_mbox_conf(f6_2)) reg5 = self.conv6_2_mbox_loc(f6_2) - cls6 = tf.nn.softmax(self.conv7_2_mbox_conf(f7_2)) + cls6 = nn.Softmax(self.conv7_2_mbox_conf(f7_2)) reg6 = self.conv7_2_mbox_loc(f7_2) # max-out background label - bmax = tf.maximum(tf.maximum(cls1[:,:,:,0:1], cls1[:,:,:,1:2]), cls1[:,:,:,2:3]) + bmax = torch.maximum(torch.maximum(cls1[:,:,:,0:1], cls1[:,:,:,1:2]), cls1[:,:,:,2:3]) - cls1 = tf.concat ([bmax, cls1[:,:,:,3:4] ], axis=-1) - cls1 = tf.nn.softmax(cls1) + cls1 = torch.cat([bmax, cls1[:,:,:,3:4] ], axis=-1) + cls1 = nn.Softmax(cls1) return [cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6, reg6] e = None if place_model_on_cpu: - e = tf.device("/CPU:0") + e = torch.device("/CPU:0") if e is not None: e.__enter__() self.model = S3FD() - self.model.load_weights (model_path) + # self.model.load_weights(model_path) if e is not None: e.__exit__(None,None,None) - self.model.build_for_run ([ ( tf.float32, nn.get4Dshape (None,None,3) ) ]) + self.model.build_for_run ([ ( torch.float32, nn.get4Dshape (None,None,3) ) ]) def __enter__(self): return self