diff --git a/__dev_archived/_trash.txt b/__dev_archived/_trash.txt index 1f8c111..aa31d7d 100644 --- a/__dev_archived/_trash.txt +++ b/__dev_archived/_trash.txt @@ -1,3 +1,194 @@ + +# import onnx +# import onnx.parser +# import onnx.checker +# from onnx import helper +# from onnx import AttributeProto, TensorProto, GraphProto +# import numpy as np +# from xlib import onnxruntime as lib_ort +# import cv2 + +# from io import BytesIO + +# _str_to_type = { +# 'float32' : TensorProto.FLOAT, +# 'bool' : TensorProto.BOOL, +# } + +# class ONNXGraphBuilder: +# """ +# onnx graph builder. + +# Tensors are defined with unknown shape dimensions, +# because onnx does not provide shape computation +# for example: broadcasting [1,3,5] with [1,5], user should compute it manually and specify output shape of broadcast operation. +# So the best way is to define tensors only with rank. +# """ + +# def __init__(self): +# self._nodes = [] +# self._name_counter = 0 + +# def _generate_tensor_name(self, ): +# result = f'T{self._name_counter}' + +# self._name_counter += 1 +# return result + +# def _get_shape(self, t): +# if isinstance(t, onnx.ValueInfoProto): +# return [ x.dim_value for x in t.type.tensor_type.shape.dim ] +# raise Exception() +# def _get_ndim(self, t): +# if isinstance(t, onnx.ValueInfoProto): +# return len(t.type.tensor_type.shape.dim) +# raise Exception() + +# def _get_data_type(self, t): +# if isinstance(t, onnx.ValueInfoProto): +# return t.type.tensor_type.elem_type +# raise Exception() + +# def tensor(self, ndim, data_type='float32', name=None): +# if name is None: +# name = self._generate_tensor_name() +# data_type = _str_to_type[data_type] + +# return helper.make_tensor_value_info(name, data_type, [-1]*ndim) + +# def constant(self, np_value : np.ndarray, data_type='float32', name=None): +# output_t = self.tensor(ndim=np_value.ndim, name=name) + +# if isinstance(data_type, str): +# data_type = _str_to_type[data_type] + +# node = helper.make_node('Constant', [], [output_t.name], +# value=helper.make_tensor(name=self._generate_tensor_name(), data_type=data_type, dims=np_value.shape, vals=np_value.flatten() ) ) + +# self._nodes.append(node) +# return output_t + + +# def greater_equal(self, input_t, value_t, output_t=None): +# input_ndim = self._get_ndim(input_t) +# value_ndim = self._get_ndim(value_t) + +# if output_t is None: +# output_t = self.tensor(ndim=max(input_ndim, value_ndim), data_type='bool') + +# self._nodes.append( helper.make_node('GreaterOrEqual', [input_t.name, value_t.name], [output_t.name]) ) +# return output_t + +# def conv2d(self, input_t, kernel_t, output_t=None, auto_pad='SAME_LOWER', strides=[1,1]): +# if output_t is None: +# output_t = self.tensor(ndim=self._get_ndim(input_t)) + +# self._nodes.append( helper.make_node('Conv', [input_t.name, kernel_t.name], [output_t.name], auto_pad=auto_pad, strides=strides) ) +# return output_t + +# def binary_erode(self, input_t, struct_np : np.ndarray): +# """ +# input_t NCHW tensor + +# struct_np np.ndarray +# HW structuring element +# """ + +# struct_t = self.constant(struct_np, data_type=self._get_data_type(input_t)) + +# import code +# code.interact(local=dict(globals(), **locals())) + + +# def make_model(self, inputs, outputs): +# graph_def = helper.make_graph(self._nodes, 'graph', inputs, outputs) + +# model = helper.make_model(graph_def) +# model.ir_version=7 +# model.opset_import[0].version=14 +# onnx.checker.check_model(model) + +# return model + + + + +# gb = ONNXGraphBuilder() + +# # val1_t = gb.constant( np.ones( (4,4)) ) +# # val2_t = gb.constant( np.ones( (1,1,4)) ) +# # #val1_t = gb.tensor(ndim=2, name='I') +# # #val2_t = gb.tensor(ndim=2, name='W') + +# # output_t = gb.greater_equal(val1_t, val2_t) + +# # model = gb.make_model([], [output_t]) + +# # sess = lib_ort.InferenceSession_with_device(model, lib_ort.get_cpu_device() ) + +# # #x = sess.run(None, {'I': np.ones( (4,4), np.float32), 'W': np.ones( (1,4), np.float32) })[0] +# # x = sess.run(None, {})[0] +# # import code +# # code.interact(local=dict(globals(), **locals())) + + + +# img = np.ones( (5,5), np.uint8 ) +# img = np.pad(img, ( (2,2), (1,1) )) +# img[4,4] = 0 +# img_er = cv2.dilate(img, el, iterations = 1 ) + +# input_t = gb.tensor(ndim=4, name='I') +# #kernel_t = gb.tensor(ndim=4, name='W') + +# o = gb.binary_erode(input_t, el) + +# # output_t = gb.conv2d(input_t, kernel_t) + +# # model = gb.make_model([input_t, kernel_t], [output_t]) + + +# # sess = lib_ort.InferenceSession_with_device(model, lib_ort.get_cpu_device() ) + + + +# # x = sess.run(None, {'I': img[None,None,...].astype(np.float32), 'W': el[None,None,...].astype(np.float32) })[0] + + + + + + +# import numpy as np +# import cv2 +# el = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))#.astype(np.float32) + +# from xlib import tensorcl as tcl + +# x = tcl.get_available_devices_info() +# #dev = tcl.get_device(x[0]) +# #tcl.set_argault_device(x[1]) + + + +# #x = tcl.Tensor.from_value( np.ones( (2,2))*2) +# tcl.test_all() + +# #x = tcl.Tensor( [4,2], initializer=tcl.initializer.RandomUniform() ) + + + + + + + + + + + + + + from collections import deque from queue import Queue from typing import Any, Union, Tuple diff --git a/__dev_archived/archived.zip b/__dev_archived/archived.zip index 5366f9d..a8f4eb6 100644 Binary files a/__dev_archived/archived.zip and b/__dev_archived/archived.zip differ