Skip to content

Commit 57577b1

Browse files
committed
Introduce a (barely) working InflationGrader
Added Distributor classes that calculate best cell distribution and an Approximator class that decides which Chops to choose to meet them; rewrote SmoothGrader base the whole InflationGrader on those. Fixed a bug in grading relations (border case with count=1) To do: - handle cases where both ends of a Wire are on walls; - write tests! - refactor: organize autograding source and its hierarchy and their respective tests - get rid of repeated code (LayerStack in Params and Distributor) - add direct imports - test/use graders with some serious examples
1 parent 022c19a commit 57577b1

File tree

11 files changed

+385
-117
lines changed

11 files changed

+385
-117
lines changed

examples/advanced/inflation_grader.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,20 @@
1212
shape = cb.ExtrudedShape(base, 1)
1313

1414
# turn one block around to test grader's skillz
15-
shape.grid[1][0].rotate(np.pi, [0, 0, 1])
15+
shape.grid[0][1].rotate(np.pi, [0, 0, 1])
1616

1717
mesh.add(shape)
1818

1919
mesh.set_default_patch("boundary", "patch")
20-
for i in (0, 1, 2):
20+
for i in (0, 2):
2121
shape.operations[i].set_patch("front", "walls")
22+
shape.operations[1].set_patch("back", "walls")
23+
24+
for i in (3, 4, 5):
25+
shape.operations[i].set_patch("back", "walls")
26+
27+
for i in (0, 3):
28+
shape.operations[i].set_patch("left", "walls")
2229

2330
mesh.modify_patch("walls", "wall")
2431

@@ -32,7 +39,7 @@
3239
vertex = list(finder.find_in_sphere(point))[0]
3340
vertex.translate([0, 0.8, 0])
3441

35-
grader = InflationGrader(mesh, 1e-2, 0.1)
42+
grader = InflationGrader(mesh, 5e-3, 0.1)
3643
grader.grade(take="max")
3744

3845
mesh.write(os.path.join("..", "case", "system", "blockMeshDict"), debug_path="debug.vtk")
Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,145 @@
1+
import dataclasses
2+
import itertools
3+
from typing import List, Sequence
4+
5+
import numpy as np
6+
7+
from classy_blocks.grading.chop import Chop
8+
from classy_blocks.types import FloatListType
9+
from classy_blocks.util.constants import TOL
10+
11+
12+
@dataclasses.dataclass
13+
class Piece:
14+
coords: FloatListType
15+
16+
@property
17+
def point_count(self) -> int:
18+
return len(self.coords)
19+
20+
@property
21+
def cell_count(self) -> int:
22+
return self.point_count - 1
23+
24+
@property
25+
def start_size(self) -> float:
26+
return abs(self.coords[1] - self.coords[0])
27+
28+
@property
29+
def end_size(self) -> float:
30+
return abs(self.coords[-1] - self.coords[-2])
31+
32+
@property
33+
def length(self) -> float:
34+
return abs(self.coords[-1] - self.coords[0])
35+
36+
@property
37+
def total_expansion(self) -> float:
38+
return self.end_size / self.start_size
39+
40+
@property
41+
def is_simple(self) -> bool:
42+
"""If all sizes are equal, this is a simple grading case"""
43+
return abs(self.start_size - self.end_size) < TOL
44+
45+
def get_chop_coords(self) -> FloatListType:
46+
"""Calculates coordinates as will be produced by a Chop"""
47+
if self.is_simple:
48+
return np.linspace(self.coords[0], self.coords[-1], num=self.point_count)
49+
50+
sizes = np.geomspace(self.start_size, self.end_size, num=self.cell_count)
51+
coords = np.ones(self.point_count) * self.coords[0]
52+
53+
add = np.cumsum(sizes)
54+
55+
if self.coords[-1] < self.coords[0]:
56+
add *= -1
57+
58+
coords[1:] += add
59+
60+
return coords
61+
62+
def get_fitness(self) -> float:
63+
differences = (self.coords - self.get_chop_coords()) ** 2
64+
return float(np.sum(differences) / self.cell_count)
65+
66+
def get_chop(self) -> Chop:
67+
return Chop(
68+
length_ratio=self.length,
69+
count=self.cell_count,
70+
total_expansion=self.total_expansion,
71+
)
72+
73+
74+
class Approximator:
75+
"""Takes a list of arbitrary cell sizes and creates Chop objects
76+
that blockMesh will understand; Tries to minimize differences
77+
between the rudimentary total expansion + count and actual
78+
desired (optimized) cell sizes.
79+
80+
In a possible future scenario where a custom mesher would be employed,
81+
actual cell sizes could be used without intervention of this object."""
82+
83+
def __init__(self, coords: FloatListType):
84+
self.coords = coords
85+
self.sizes = np.diff(self.coords)
86+
self.ratios = np.roll(self.sizes, -1)[:-1] / self.sizes[:-1]
87+
self.count = len(coords) - 1
88+
89+
@property
90+
def length(self) -> float:
91+
return abs(self.coords[-1] - self.coords[0])
92+
93+
@property
94+
def is_simple(self) -> bool:
95+
"""Returns True if this is a simple one-chop equal-size scenario"""
96+
return max(self.sizes) - min(self.sizes) < TOL
97+
98+
def get_pieces(self, indexes: List[int]) -> List[Piece]:
99+
"""Creates Piece objects between given indexes (adds 0 and -1 automatically);
100+
does not check if count is smaller than length of indexes"""
101+
indexes = [0, *indexes]
102+
103+
pieces = [Piece(self.coords[indexes[i] : indexes[i + 1] + 1]) for i in range(len(indexes) - 1)]
104+
pieces.append(Piece(self.coords[indexes[-1] :]))
105+
106+
return pieces
107+
108+
def get_chops(self, number: int) -> List[Chop]:
109+
if self.is_simple:
110+
# don't bother
111+
return [Chop(count=self.count)]
112+
113+
# limit number of chops so there's no zero-cell chops
114+
number = min(number + 1, self.count - 1)
115+
116+
# brute force: choose the best combination of chops
117+
# but don't overdo it; just try a couple of scenarios
118+
refinement = 4
119+
indexes = np.linspace(0, self.count + 1, num=number * refinement, dtype="int")[1:-1]
120+
121+
best_fit = 1e12
122+
best_scenario: Sequence[int] = [0] * (number - 1)
123+
124+
combinations = itertools.combinations(indexes, r=number - 2)
125+
for scenario in combinations:
126+
try:
127+
pieces = self.get_pieces(scenario) # type:ignore
128+
fit = sum(piece.get_fitness() for piece in pieces)
129+
if fit < best_fit:
130+
best_fit = fit
131+
best_scenario = scenario
132+
except (IndexError, ValueError):
133+
# obviously not the best scenario, eh?
134+
continue
135+
136+
pieces = self.get_pieces(best_scenario) # type:ignore
137+
138+
chops = [piece.get_chop() for piece in pieces]
139+
140+
# normalize length ratios
141+
length = self.length
142+
for chop in chops:
143+
chop.length_ratio = chop.length_ratio / length
144+
145+
return chops

src/classy_blocks/grading/autograding/params/distributor.py

Lines changed: 36 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import scipy.interpolate
77
import scipy.optimize
88

9-
from classy_blocks.grading.autograding.params.base import sum_length
9+
from classy_blocks.grading.autograding.params.approximator import Approximator
1010
from classy_blocks.grading.autograding.params.layers import InflationLayer
1111
from classy_blocks.grading.chop import Chop
1212
from classy_blocks.types import FloatListType
@@ -17,7 +17,7 @@
1717
class DistributorBase(abc.ABC):
1818
"""Algorithm that creates chops from given count and sizes;
1919
first distributes cells using a predefined 'ideal' sizes and ratios,
20-
then optimizes their individual size to get as close to that ideal as possible.
20+
then optimizes their *individual* size to get as close to that ideal as possible.
2121
2222
Then, when cells are placed, Chops are created so that they produce as similar
2323
cells to the calculated as possible. Since blockMesh only supports equal
@@ -32,10 +32,10 @@ class DistributorBase(abc.ABC):
3232

3333
@staticmethod
3434
def get_actual_ratios(coords: FloatListType) -> FloatListType:
35+
# TODO: repeated code within Approximator! un-repeat
3536
lengths = np.diff(coords)
36-
return (lengths[:-1] / np.roll(lengths, -1)[:-1]) ** -1
37+
return np.roll(lengths, -1)[:-1] / lengths[:-1]
3738

38-
@abc.abstractmethod
3939
def get_ideal_ratios(self) -> FloatListType:
4040
"""Returns desired cell-to-cell ratios"""
4141
return np.ones(self.count + 1)
@@ -45,7 +45,9 @@ def get_ratio_weights(self) -> FloatListType:
4545
"""Returns weights of cell ratios"""
4646

4747
def get_raw_coords(self) -> FloatListType:
48-
# 'count' denotes number of 'intervals' so there must be another point
48+
# 'count' denotes number of 'intervals' so add one more point to get points;
49+
# first and last cells are added artificially ('ghost cells') to calculate
50+
# proper expansion ratios and sizes
4951
return np.concatenate(
5052
([-self.size_before], np.linspace(0, self.length, num=self.count + 1), [self.length + self.size_after])
5153
)
@@ -60,69 +62,31 @@ def get_smooth_coords(self) -> FloatListType:
6062
def ratios(inner_coords):
6163
coords[2:-2] = inner_coords
6264

63-
difference = self.get_actual_ratios(coords) - self.get_ideal_ratios()
64-
return difference * self.get_ratio_weights()
65+
# to prevent 'flipping' over and producing zero or negative length, scale with e^-ratio
66+
difference = -(self.get_ratio_weights() * (self.get_actual_ratios(coords) - self.get_ideal_ratios()))
67+
return np.exp(difference) - 1
6568

66-
scale = min(self.size_before, self.size_after, self.length / self.count) / 10
67-
_ = scipy.optimize.least_squares(ratios, coords[2:-2], method="lm", ftol=scale / 100, x_scale=scale)
69+
scale = min(self.size_before, self.size_after, self.length / self.count) / 100
70+
_ = scipy.optimize.least_squares(ratios, coords[2:-2], ftol=scale / 10, x_scale=scale)
6871

69-
return coords
72+
# omit the 'ghost' cells
73+
return coords[1:-1]
7074

7175
@property
7276
def is_simple(self) -> bool:
7377
# Don't overdo basic, simple-graded blocks
7478
base_size = self.length / self.count
7579

7680
# TODO: use a more relaxed criterion?
77-
return base_size - self.size_before < TOL and base_size - self.size_after < TOL
81+
return base_size - self.size_before < TOL and base_size - self.size_after < 10 * TOL
7882

7983
def get_chops(self, pieces: int) -> List[Chop]:
80-
if self.is_simple:
81-
return [Chop(count=self.count)]
84+
approximator = Approximator(self.get_smooth_coords())
85+
return approximator.get_chops(pieces)
8286

87+
def get_last_size(self) -> float:
8388
coords = self.get_smooth_coords()
84-
sizes = np.diff(coords)
85-
ratios = self.get_actual_ratios(coords)
86-
87-
count = len(ratios) - 1
88-
89-
# create a piecewise linear function from a number of chosen indexes and their respective c2c ratios
90-
# then optimize indexes to obtain best fit
91-
# fratios = scipy.interpolate.interp1d(range(len(ratios)), ratios)
92-
93-
# def get_piecewise(indexes: List[int]) -> Callable:
94-
# values = np.take(ratios, indexes)
95-
# return scipy.interpolate.interp1d(indexes, values)
96-
97-
# def get_fitness(indexes: List[int]) -> float:
98-
# fitted = get_piecewise(indexes)(range(len(ratios)))
99-
100-
# ss_tot = np.sum((ratios - np.mean(ratios)) ** 2)
101-
# ss_res = np.sum((ratios - fitted) ** 2)
102-
103-
# return 1 - (ss_res / ss_tot)
104-
105-
# print(get_fitness([0, 9, 10, count]))
106-
# print(get_fitness(np.linspace(0, count, num=pieces + 1, dtype=int)))
107-
108-
chops: List[Chop] = []
109-
indexes = np.linspace(0, count, num=pieces + 1, dtype=int)
110-
111-
for i, index in enumerate(indexes[:-1]):
112-
chop_ratios = ratios[index : indexes[i + 1]]
113-
ratio = np.prod(chop_ratios)
114-
chop_count = indexes[i + 1] - indexes[i]
115-
avg_ratio = ratio ** (1 / chop_count)
116-
length = sum_length(sizes[index], chop_count, avg_ratio)
117-
chop = Chop(length_ratio=length, total_expansion=ratio, count=chop_count)
118-
chops.append(chop)
119-
120-
# normalize length ratios
121-
sum_ratio = sum([chop.length_ratio for chop in chops])
122-
for chop in chops:
123-
chop.length_ratio = chop.length_ratio / sum_ratio
124-
125-
return chops
89+
return coords[-2] - coords[-3]
12690

12791

12892
@dataclasses.dataclass
@@ -132,8 +96,9 @@ def get_ideal_ratios(self):
13296
return super().get_ideal_ratios()
13397

13498
def get_ratio_weights(self):
135-
# Enforce stricter policy on size_before and size_after
13699
weights = np.ones(self.count + 1)
100+
# Enforce stricter policy on the first few cells
101+
# to match size_before and size_after
137102
for i in (0, 1, 2, 3):
138103
w = 2 ** (3 - i)
139104
weights[i] = w
@@ -146,26 +111,34 @@ def get_ratio_weights(self):
146111
class InflationDistributor(SmoothDistributor):
147112
c2c_expansion: float
148113
bl_thickness_factor: int
114+
buffer_expansion: float
115+
bulk_size: float
149116

150117
@property
151118
def is_simple(self) -> bool:
152119
return False
153120

154121
def get_ideal_ratios(self):
122+
# TODO: combine this logic and LayerStack;
123+
# possibly package all parameters into a separate dataclass
124+
ratios = super().get_ideal_ratios()
125+
155126
# Ideal growth ratio in boundary layer is user-specified c2c_expansion;
156127
inflation_layer = InflationLayer(self.size_before, self.c2c_expansion, self.bl_thickness_factor, 1e12)
157128
inflation_count = inflation_layer.count
158-
print(f"Inflation count: {inflation_count}")
159-
160-
ratios = super().get_ideal_ratios()
161129

162130
ratios[:inflation_count] = self.c2c_expansion
163-
print(ratios)
131+
132+
# add a buffer layer if needed
133+
last_inflation_size = inflation_layer.end_size
134+
if self.bulk_size > self.buffer_expansion * last_inflation_size:
135+
buffer_count = int(np.log(self.bulk_size / last_inflation_size) / np.log(self.buffer_expansion)) + 1
136+
ratios[inflation_count : inflation_count + buffer_count] = self.buffer_expansion
164137

165138
return ratios
166139

167140
def get_ratio_weights(self):
168-
return super().get_ratio_weights()
169-
170-
def _get_ratio_weights(self):
141+
# using the same weights as in SmoothDistributor
142+
# can trigger overflow warnings but doesn't produce
143+
# better chops; thus, keep it simple
171144
return np.ones(self.count + 1)

0 commit comments

Comments
 (0)