Skip to content

Commit

Permalink
fix #5034. Apply Vector Field Node performance proposal solution
Browse files Browse the repository at this point in the history
  • Loading branch information
satabol committed Oct 28, 2023
1 parent d85084d commit b8022d4
Show file tree
Hide file tree
Showing 4 changed files with 165 additions and 40 deletions.
14 changes: 14 additions & 0 deletions nodes/field/vector_field_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import bpy
from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty, StringProperty
from datetime import datetime

from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, repeat_last_for_length, ensure_nesting_level
Expand Down Expand Up @@ -54,6 +55,8 @@ def process(self):
if not any(socket.is_linked for socket in self.outputs):
return

t0 = datetime.now()-datetime.now()
dt0 = datetime.now()
vertices_s = self.inputs['Vertices'].sv_get()
coeffs_s = self.inputs['Coefficient'].sv_get()
fields_s = self.inputs['Field'].sv_get()
Expand All @@ -62,6 +65,8 @@ def process(self):
vertices_s = ensure_nesting_level(vertices_s, 4)
coeffs_s = ensure_nesting_level(coeffs_s, 3)
fields_s = ensure_nesting_level(fields_s, 2, data_types=(SvVectorField,))
dt0 = datetime.now()-dt0
t0 = t0+dt0

verts_out = []
for fields, vertices_l, coeffs_l, iterations_l in zip_long_repeat(fields_s, vertices_s, coeffs_s, iterations_s):
Expand All @@ -82,17 +87,26 @@ def process(self):
vertex = (np.array(vertex) + coeff * vector).tolist()
new_verts = [vertex]
else:
t0 = datetime.now()-datetime.now()
dt0 = datetime.now()
coeffs = repeat_last_for_length(coeffs, len(vertices))
vertices = np.array(vertices)
dt0 = datetime.now()-dt0
t0 = t0+dt0

for i in range(iterations):
dt0 = datetime.now()
xs = vertices[:,0]
ys = vertices[:,1]
zs = vertices[:,2]
new_xs, new_ys, new_zs = field.evaluate_grid(xs, ys, zs)
new_vectors = np.dstack((new_xs[:], new_ys[:], new_zs[:]))
new_vectors = np.array(coeffs)[np.newaxis].T * new_vectors[0]
vertices = vertices + new_vectors
dt0 = datetime.now()-dt0
t0 = t0+dt0
new_verts = vertices if self.output_numpy else vertices.tolist()
print(f'process t0={t0}')

verts_out.append(new_verts)

Expand Down
26 changes: 26 additions & 0 deletions utils/curve/nurbs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1152,6 +1152,9 @@ def evaluate(self, t):
return np.array([0,0,0])
else:
return numerator / denominator
# numerator, denominator = self.fraction_single_v01(0, t)
# res = np.where( denominator.reshape(-1,1)==0.0, np.repeat( np.array( [[0,0,0]] ), denominator.size, axis=0), numerator/denominator[:,np.newaxis] )
# return res

def fraction(self, deriv_order, ts):
n = len(ts)
Expand All @@ -1166,6 +1169,18 @@ def fraction(self, deriv_order, ts):

return numerator, denominator[np.newaxis].T

# def fraction_single_v01(self, deriv_order, t):
# p = self.degree
# k = len(self.control_points)
# ts = np.array([t]) if not hasattr(t, '__len__') else np.array(t)
# ns = np.array([self.basis.derivative(i, p, deriv_order)(ts) for i in range(k)]) # (k,)
# coeffs = ns * self.weights[np.newaxis].T # (k, )
# coeffs_t = coeffs.T
# numerator = np.transpose((np.expand_dims(coeffs, axis=2) * np.expand_dims(self.control_points, axis=1)), (1,0,2) ) # (k, n, 3)
# numerator = numerator.sum(axis=1) # (3,n)
# denominator = coeffs.sum(axis=0) # ()
# return numerator, denominator

def fraction_single(self, deriv_order, t):
p = self.degree
k = len(self.control_points)
Expand All @@ -1190,6 +1205,17 @@ def evaluate_array(self, ts):
# if (denominator == 0).any():
# print("Num:", numerator)
# print("Denom:", denominator)

# deriv_order = 0
# p = self.degree
# k = len(self.control_points)
# ts = np.array(ts)
# ns = np.array([self.basis.derivative(i, p, deriv_order)(ts) for i in range(k)]) # (k,)
# coeffs = ns * self.weights[np.newaxis].T # (k, )
# numerator = np.transpose((np.expand_dims(coeffs, axis=2) * np.expand_dims(self.control_points, axis=1)), (1,0,2) ) # (k, n, 3)
# numerator = numerator.sum(axis=1) # (3,n)
# denominator = coeffs.sum(axis=0) # ()

return nurbs_divide(numerator, denominator)

def tangent(self, t, tangent_delta=None):
Expand Down
5 changes: 3 additions & 2 deletions utils/field/vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -1044,8 +1044,9 @@ def _evaluate(self, vertices):
if self.only_2D:
return self.surface.evaluate_array(us, vs)

surf_vertices = self.surface.evaluate_array(us, vs)
spline_normals = self.surface.normal_array(us, vs)
#surf_vertices = self.surface.evaluate_array(us, vs)
#spline_normals = self.surface.normal_array(us, vs)
spline_normals, surf_vertices = self.surface.normal_array_with_source_vertices(us, vs)
zs = vertices[:,self.orient_axis].flatten()
zs = zs[np.newaxis].T
v1 = zs * spline_normals
Expand Down
160 changes: 122 additions & 38 deletions utils/surface/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from collections import defaultdict

from mathutils import Matrix, Vector
from datetime import datetime


from sverchok.utils.math import (
ZERO, FRENET, HOUSEHOLDER, TRACK, DIFF, TRACK_NORMAL,
Expand Down Expand Up @@ -159,59 +161,141 @@ def normal(self, u, v):
# return np.array(normals)

def normal_array(self, us, vs):
result_normals, *_ = self.normal_array_with_source_vertices(us, vs)
return result_normals

def normal_array_with_source_vertices(self, us, vs):
h = 0.001
result = np.empty((len(us), 3))
t7=datetime.now()
_points = np.empty( (0, 3), dtype=np.float64)
_points_u_h = np.empty( (0, 3), dtype=np.float64)
_points_v_h = np.empty( (0, 3), dtype=np.float64)
v_to_u = defaultdict(list)
v_to_i = defaultdict(list)
for i, (u, v) in enumerate(zip(us, vs)):
v_to_u[v].append(u)
v_to_i[v].append(i)
for v, us_by_v in v_to_u.items():
us_by_v = np.array(us_by_v)
is_by_v = v_to_i[v]
v_to_i_flatten = np.hstack(np.array( list(v_to_i.values())).flatten())
t7 = datetime.now()-t7
t1 = datetime.now()-datetime.now()
t2 = datetime.now()-datetime.now()
t3 = datetime.now()-datetime.now()
t5 = datetime.now()-datetime.now()
t6 = datetime.now()-datetime.now()
t8 = datetime.now()-datetime.now()

t4 = datetime.now()
list_spline_v = []
list_spline_h = []
_v = np.array( list(v_to_u.keys()), dtype=np.float64 )
for i_spline, v_spline in enumerate(self.v_splines):
v_min, v_max = v_spline.get_u_bounds()
_vx = (v_max-v_min)*_v+v_min
_list_v_i = np.where( _vx+h<v_max, _vx , _vx-h)
_list_h_i = np.where( _vx+h<v_max, _vx+h, _vx )
list_spline_v.append( _list_v_i )
list_spline_h.append( _list_h_i )

t4 = datetime.now()-t4

r_v = []
r_h = []
t0 = datetime.now()
for i, v_spline in enumerate(self.v_splines):
_r_v, _r_h = v_spline.evaluate_array( np.concatenate( (list_spline_v[i], list_spline_h[i]) )).reshape(2,-1,3) # to increase performance for one call
r_v.append( _r_v )
r_h.append( _r_h )
t0 = datetime.now()-t0

u_min, u_max = 0.0, 1.0

# ddd = dict( enumerate(v_to_u.items()) )
# def normal_calc(i_on_spline):
# v, _us_by_v = ddd[i_on_spline]
# us_by_v = np.array(_us_by_v)
# spline_vertices = []
# spline_vertices_h = []

# for i_spline, v_spline in enumerate(self.v_splines):
# point_v = r_v[i_spline][i_on_spline]
# point_h = r_h[i_spline][i_on_spline]
# spline_vertices.append(point_v)
# spline_vertices_h.append(point_h)

# if v+h <= v_max:
# u_spline = self.get_u_spline(v , spline_vertices )
# u_spline_h = self.get_u_spline(v+h, spline_vertices_h)
# else:
# u_spline = self.get_u_spline(v-h, spline_vertices )
# u_spline_h = self.get_u_spline(v , spline_vertices_h)

# good_us = us_by_v + h < u_max
# us_v_gb = np.where( good_us, us_by_v , us_by_v-h )
# us_h_gb = np.where( good_us, us_by_v+h, us_by_v )

# points, points_u_h = u_spline.evaluate_array( np.concatenate( (us_v_gb, us_h_gb) ) ).reshape(2,-1,3) # to increase performance for one call
# points_v_h = u_spline_h.evaluate_array(us_by_v)
# res = (points, points_u_h, points_v_h)
# return res

# t10 = datetime.now()
# normal_calc_vectorize = np.vectorize( normal_calc, otypes=[np.object] )
# rrr = normal_calc_vectorize( np.array(np.arange(len(v_to_i), dtype=np.int32)) )
# t10 = datetime.now()-t10
# print(f't10={t10}')

for i_on_spline, (v, _us_by_v) in enumerate(v_to_u.items()):
us_by_v = np.array(_us_by_v)
#i_by_v = v_to_i[v]
spline_vertices = []
spline_vertices_h = []
for v_spline in self.v_splines:
v_min, v_max = v_spline.get_u_bounds()
vx = (v_max - v_min) * v + v_min
if vx +h <= v_max:
point = v_spline.evaluate(vx)
point_h = v_spline.evaluate(vx + h)
else:
point = v_spline.evaluate(vx - h)
point_h = v_spline.evaluate(vx)
spline_vertices.append(point)

dt1 = datetime.now()
for i_spline, v_spline in enumerate(self.v_splines):
point_v = r_v[i_spline][i_on_spline]
point_h = r_h[i_spline][i_on_spline]
spline_vertices.append(point_v)
spline_vertices_h.append(point_h)
dt1 = datetime.now()-dt1
t1 = t1+dt1

dt2 = datetime.now()
if v+h <= v_max:
u_spline = self.get_u_spline(v, spline_vertices)
u_spline = self.get_u_spline(v , spline_vertices )
u_spline_h = self.get_u_spline(v+h, spline_vertices_h)
else:
u_spline = self.get_u_spline(v-h, spline_vertices)
u_spline_h = self.get_u_spline(v, spline_vertices_h)
u_min, u_max = 0.0, 1.0
u_spline = self.get_u_spline(v-h, spline_vertices )
u_spline_h = self.get_u_spline(v , spline_vertices_h)
dt2 = datetime.now()-dt2
t2 = t2+dt2

dt3 = datetime.now()
good_us = us_by_v + h < u_max
bad_us = np.logical_not(good_us)

good_points = np.broadcast_to(good_us[np.newaxis].T, (len(us_by_v), 3)).flatten()
bad_points = np.logical_not(good_points)
points = np.empty((len(us_by_v), 3))
points[good_us] = u_spline.evaluate_array(us_by_v[good_us])
points[bad_us] = u_spline.evaluate_array(us_by_v[bad_us] - h)
points_u_h = np.empty((len(us_by_v), 3))
points_u_h[good_us] = u_spline.evaluate_array(us_by_v[good_us] + h)
points_u_h[bad_us] = u_spline.evaluate_array(us_by_v[bad_us])
points_v_h = u_spline_h.evaluate_array(us_by_v)

dvs = (points_v_h - points) / h
dus = (points_u_h - points) / h
normals = np.cross(dus, dvs)
norms = np.linalg.norm(normals, axis=1, keepdims=True)
normals = normals / norms
us_v_gb = np.where( good_us, us_by_v , us_by_v-h )
us_h_gb = np.where( good_us, us_by_v+h, us_by_v )

idxs = np.array(is_by_v)[np.newaxis].T
np.put_along_axis(result, idxs, normals, axis=0)
return result
points, points_u_h = u_spline.evaluate_array( np.concatenate( (us_v_gb, us_h_gb) ) ).reshape(2,-1,3) # to increase performance for one call
points_v_h = u_spline_h.evaluate_array(us_by_v)
_points = np.concatenate( (_points, points) )
_points_u_h = np.concatenate( (_points_u_h, points_u_h) )
_points_v_h = np.concatenate( (_points_v_h, points_v_h) )
dt3 = datetime.now()-dt3
t3 = t3+dt3

t8 = datetime.now()
_dvs = (_points_v_h - _points)/h
_dus = (_points_u_h - _points)/h
_normals = np.cross(_dus, _dvs)
_norms = np.linalg.norm(_normals, axis=1, keepdims=True)
_normals = _normals / _norms
t8 = datetime.now()-t8

t9=datetime.now()
_result_normals = _normals[np.argsort(v_to_i_flatten)]
_result_point = _points[np.argsort(v_to_i_flatten)]
t9=datetime.now()-t9
print(f't7={t7}; t4={t4}; t0={t0}; t1={t1}; t2={t2}; t3={t3}; t8={t8}; t9={t9} summa: {t7+t4+t0+t1+t2+t3+t5+t6+t8+t9}')
return _result_normals, _result_point

PROJECT = 'project'
COPROJECT = 'coproject'
Expand Down

0 comments on commit b8022d4

Please sign in to comment.