Skip to content

Commit

Permalink
[tuner]: retire data class GPUPipelineOptions, use iree_gpu.PipelineO…
Browse files Browse the repository at this point in the history
…ptionsAttr. (#626)

This PR is relevant to task: Use IREE bindings for compilation info
(incl., lowering_config and translation_info) in
#453.

Retire data class GPUPipelineOptions, use python binding
iree_gpu.PipelineOptionsAttr instead.

---------

Signed-off-by: Bangtian Liu <liubangtian@gmail.com>
  • Loading branch information
bangtianliu authored Nov 28, 2024
1 parent e2276d0 commit ba8dd7d
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 58 deletions.
23 changes: 13 additions & 10 deletions tuner/tuner/candidate_gen_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,9 @@ def test_apply_params_mmt(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[8, 8, 8],
subgroup_m_count=16,
subgroup_n_count=16,
gpu_pipeline_options=common.GpuPipelineOptions(prefetch_shared_memory=True),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(
prefetch_shared_memory=True
),
waves_per_eu=8,
)

Expand Down Expand Up @@ -109,8 +111,10 @@ def test_apply_params_conv(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[464, 320, 16],
subgroup_m_count=1,
subgroup_n_count=4,
gpu_pipeline_options=common.GpuPipelineOptions(
reorder_workgroups_strategy=common.ReorderWorkgroupsStrategy.TRANSPOSE
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(
reorder_workgroups_strategy=iree_gpu.ReorderWorkgroupsStrategyAttr.get(
iree_gpu.ReorderWorkgroupsStrategy.Transpose
)
),
waves_per_eu=2,
)
Expand All @@ -131,7 +135,6 @@ def test_apply_params_conv(tuner_ctx: common.TunerContext) -> None:

assert modified
modified = remove_comments(modified)

assert embeddable
assert (
"intrinsic = #iree_gpu.mma_layout<MFMA_F32_16x16x16_F16>, subgroup_m_count = 1, subgroup_n_count = 4"
Expand All @@ -143,7 +146,7 @@ def test_apply_params_conv(tuner_ctx: common.TunerContext) -> None:
)
assert "tile_sizes = [[1, 1, 464, 320, 1, 1, 16]]" in modified
assert (
"gpu_pipeline_options = #iree_gpu.pipeline_options<reorder_workgroups_strategy = Transpose>"
"gpu_pipeline_options = #iree_gpu.pipeline_options<reorder_workgroups_strategy = <Transpose>>"
in modified
)
assert '{llvm_func_attrs = {"amdgpu-waves-per-eu" = "2"}' in modified
Expand Down Expand Up @@ -175,7 +178,7 @@ def test_apply_params_contract(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[480, 384, 32],
subgroup_m_count=1,
subgroup_n_count=4,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=2,
)

Expand Down Expand Up @@ -224,7 +227,7 @@ def test_apply_params_batch_matmul(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[416, 320, 128],
subgroup_m_count=2,
subgroup_n_count=2,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=2,
)

Expand Down Expand Up @@ -276,7 +279,7 @@ def test_apply_params_batch_mmt_float(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[128, 64, 128],
subgroup_m_count=2,
subgroup_n_count=2,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=2,
)

Expand Down Expand Up @@ -326,7 +329,7 @@ def test_apply_params_batch_mmt_int(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[128, 64, 128],
subgroup_m_count=2,
subgroup_n_count=2,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=4,
)

Expand Down Expand Up @@ -399,7 +402,7 @@ def test_apply_params_broadcast_rhs_mmt(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[128, 64, 128],
subgroup_m_count=2,
subgroup_n_count=2,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=4,
)

Expand Down
37 changes: 5 additions & 32 deletions tuner/tuner/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,35 +114,6 @@ def __str__(self) -> str:
return self.name.title()


@dataclass
class GpuPipelineOptions:
"""Represents the `iree_gpu.pipeline_options` attribute"""

prefetch_shared_memory: Optional[bool] = None
no_reduce_shared_memory_bank_conflicts: Optional[bool] = None
reorder_workgroups_strategy: Optional[ReorderWorkgroupsStrategy] = None

def all_default(self) -> bool:
return all(x is None for x in astuple(self))

def __str__(self) -> str:
options: list[str] = []
if self.prefetch_shared_memory is not None:
options.append(
f"prefetch_shared_memory = {str(self.prefetch_shared_memory).lower()}"
)
if self.no_reduce_shared_memory_bank_conflicts is not None:
options.append(
f"no_reduce_shared_memory_bank_conflicts = {str(self.no_reduce_shared_memory_bank_conflicts).lower()}"
)
if self.reorder_workgroups_strategy is not None:
options.append(
f"reorder_workgroups_strategy = {self.reorder_workgroups_strategy}"
)

return f"#iree_gpu.pipeline_options<{', '.join(options)}>"


@dataclass
class Configuration:
subgroup_size: int
Expand All @@ -151,14 +122,16 @@ class Configuration:
tile_sizes: list[int]
subgroup_m_count: int
subgroup_n_count: int
gpu_pipeline_options: GpuPipelineOptions
gpu_pipeline_options: iree_gpu.PipelineOptionsAttr
waves_per_eu: int


def get_pipeline_config(configuration: Configuration) -> str:
extra_config = ""
if not configuration.gpu_pipeline_options.all_default():
extra_config += f", gpu_pipeline_options = {configuration.gpu_pipeline_options}"
pipeline_options = configuration.gpu_pipeline_options
if pipeline_options != iree_gpu.PipelineOptionsAttr.get():
extra_config += f", gpu_pipeline_options = {pipeline_options}"

if configuration.waves_per_eu != 2:
extra_config += f', llvm_func_attrs = {{"amdgpu-waves-per-eu" = "{configuration.waves_per_eu}"}}'
return extra_config
Expand Down
28 changes: 16 additions & 12 deletions tuner/tuner/common_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,27 +47,29 @@ def test_get_shaped_type_to_str(tuner_ctx: common.TunerContext) -> None:
assert str(common.ShapedType([-1, 2, 3], tuner_ctx.type.f16)) == "?x2x3xf16"


def test_gpu_pipeline_options() -> None:
options = common.GpuPipelineOptions()
assert options.all_default()
def test_gpu_pipeline_options(tuner_ctx: common.TunerContext) -> None:
options = iree_gpu.PipelineOptionsAttr.get()
assert str(options) == "#iree_gpu.pipeline_options<>"

options.prefetch_shared_memory = True
assert not options.all_default()
options = iree_gpu.PipelineOptionsAttr.get(prefetch_shared_memory=True)
assert str(options) == "#iree_gpu.pipeline_options<prefetch_shared_memory = true>"

options.no_reduce_shared_memory_bank_conflicts = False
options = iree_gpu.PipelineOptionsAttr.get(
prefetch_shared_memory=True, no_reduce_shared_memory_bank_conflicts=False
)
assert (
str(options)
== "#iree_gpu.pipeline_options<prefetch_shared_memory = true, no_reduce_shared_memory_bank_conflicts = false>"
)

options = common.GpuPipelineOptions()
options.reorder_workgroups_strategy = common.ReorderWorkgroupsStrategy.TRANSPOSE
assert not options.all_default()
options = iree_gpu.PipelineOptionsAttr.get(
reorder_workgroups_strategy=iree_gpu.ReorderWorkgroupsStrategyAttr.get(
iree_gpu.ReorderWorkgroupsStrategy.Transpose
)
)
assert (
str(options)
== "#iree_gpu.pipeline_options<reorder_workgroups_strategy = Transpose>"
== "#iree_gpu.pipeline_options<reorder_workgroups_strategy = <Transpose>>"
)


Expand All @@ -81,7 +83,7 @@ def test_get_pipeline_config(mlir_ctx: ir.Context) -> None:
tile_sizes=[4, 8, 16],
subgroup_m_count=1,
subgroup_n_count=1,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=2,
)
config1_str: str = common.get_pipeline_config(config)
Expand All @@ -91,7 +93,9 @@ def test_get_pipeline_config(mlir_ctx: ir.Context) -> None:
config2_str: str = common.get_pipeline_config(config)
assert config2_str == ', llvm_func_attrs = {"amdgpu-waves-per-eu" = "4"}'

config.gpu_pipeline_options.prefetch_shared_memory = True
config.gpu_pipeline_options = iree_gpu.PipelineOptionsAttr.get(
prefetch_shared_memory=True
)
config3_str = common.get_pipeline_config(config)
assert (
config3_str
Expand Down
2 changes: 1 addition & 1 deletion tuner/tuner/dispatch_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ def generate_solutions(
[lookup(m), lookup(n), lookup(k)],
lookup(sg_m_cnt),
lookup(sg_n_cnt),
GpuPipelineOptions(),
iree_gpu.PipelineOptionsAttr.get(),
lookup(waves_per_eu),
)
solver.add(z3.simplify(z3.Not(z3.And(list(x == model[x] for x in all_vars)))))
Expand Down
6 changes: 3 additions & 3 deletions tuner/tuner/dispatch_parser_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def test_get_mmt_tile_sizes(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[128, 320, 32],
subgroup_m_count=0,
subgroup_n_count=0,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=0,
)
assert dispatch_parser.get_mmt_tile_sizes(config) == [128, 320, 32]
Expand All @@ -65,7 +65,7 @@ def test_get_conv_tile_sizes(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[464, 320, 16],
subgroup_m_count=1,
subgroup_n_count=4,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=1,
)
assert dispatch_parser.ConvParser().get_conv_tile_sizes(config) == [
Expand All @@ -89,7 +89,7 @@ def test_get_contract_tile_sizes(tuner_ctx: common.TunerContext) -> None:
tile_sizes=[4, 8, 16],
subgroup_m_count=1,
subgroup_n_count=1,
gpu_pipeline_options=common.GpuPipelineOptions(),
gpu_pipeline_options=iree_gpu.PipelineOptionsAttr.get(),
waves_per_eu=2,
)
assert dispatch_parser.get_contract_tile_sizes(config, "mnk") == [4, 8, 16]
Expand Down

0 comments on commit ba8dd7d

Please sign in to comment.