Skip to content

Commit

Permalink
Check extension supportance via ext_oneapi_supports_cl_extension (#667
Browse files Browse the repository at this point in the history
)

Add experimental check cl_bf16_conversion extention query
  • Loading branch information
LiyangLingIntel authored Jun 28, 2024
1 parent fd4fce2 commit e66d1aa
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 1 deletion.
4 changes: 4 additions & 0 deletions csrc/gpu/aten/core/DeviceInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ struct DeviceInfo {
uint32_t max_num_sub_groups;
std::vector<size_t> sub_group_sizes;
bool support_fp64;
bool support_cl_bf16_conversion;
bool support_cl_sg_matmul_acc;
bool support_cl_sg_matmul_acc_tf32;
bool support_cl_sg_2d_block_io;
};

} // namespace dpcpp
Expand Down
9 changes: 9 additions & 0 deletions csrc/gpu/runtime/Device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,11 @@ static void initDeviceProperty(DeviceId device_id) {
: 8;
device_prop.support_atomic64 = device.has(dpcpp_dev_aspect_atomic64);
device_prop.support_fp64 = device.has(dpcpp_dev_aspect_fp64);
sycl::ext::oneapi::experimental::cl_version version;
device_prop.support_cl_bf16_conversion = device.ext_oneapi_supports_cl_extension("cl_intel_bfloat16_conversions", &version);
device_prop.support_cl_sg_matmul_acc = device.ext_oneapi_supports_cl_extension("cl_intel_subgroup_matrix_multiply_accumulate", &version);
device_prop.support_cl_sg_matmul_acc_tf32 = device.ext_oneapi_supports_cl_extension("cl_intel_subgroup_matrix_multiply_accumulate_tensor_float32", &version);
device_prop.support_cl_sg_2d_block_io = device.ext_oneapi_supports_cl_extension("cl_intel_subgroup_2d_block_io", &version);

device_properties[device_id] = device_prop;

Expand Down Expand Up @@ -356,6 +361,10 @@ static void initDeviceProperty(DeviceId device_id) {
dev_info.max_num_sub_groups = device_prop.max_num_subgroup;
dev_info.sub_group_sizes = device_prop.subgroup_sizes;
dev_info.support_fp64 = device_prop.support_fp64;
dev_info.support_cl_bf16_conversion = device_prop.support_cl_bf16_conversion;
dev_info.support_cl_sg_matmul_acc = device_prop.support_cl_sg_matmul_acc;
dev_info.support_cl_sg_matmul_acc_tf32 = device_prop.support_cl_sg_matmul_acc_tf32;
dev_info.support_cl_sg_2d_block_io = device_prop.support_cl_sg_2d_block_io;
#if (defined(__INTEL_LLVM_COMPILER) && __INTEL_LLVM_COMPILER >= 20240100)
dev_info.device_arch = static_cast<uint64_t>(device_prop.device_arch);
#else
Expand Down
4 changes: 4 additions & 0 deletions csrc/gpu/runtime/DeviceProp.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,10 @@ struct DeviceProp {

bool support_fp64;
bool support_atomic64;
bool support_cl_bf16_conversion;
bool support_cl_sg_matmul_acc;
bool support_cl_sg_matmul_acc_tf32;
bool support_cl_sg_2d_block_io;
};

} // namespace dpcpp
Expand Down
11 changes: 10 additions & 1 deletion intel_extension_for_pytorch/csrc/xpu/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -577,6 +577,10 @@ static void register_xpu_device_info(PyObject* module) {
.def_readonly("max_num_sub_groups", &DeviceInfo::max_num_sub_groups)
.def_readonly("sub_group_sizes", &DeviceInfo::sub_group_sizes)
.def_readonly("has_fp64", &DeviceInfo::support_fp64)
.def_readonly("support_cl_bf16_conversion", &DeviceInfo::support_cl_bf16_conversion)
.def_readonly("support_cl_sg_matmul_acc", &DeviceInfo::support_cl_sg_matmul_acc)
.def_readonly("support_cl_sg_matmul_acc_tf32", &DeviceInfo::support_cl_sg_matmul_acc_tf32)
.def_readonly("support_cl_sg_2d_block_io", &DeviceInfo::support_cl_sg_2d_block_io)
.def_readonly("device_arch", &DeviceInfo::device_arch)
.def_property_readonly(
"dev_type", [](const DeviceInfo& info) { return get_dev_type(info); })
Expand All @@ -589,7 +593,12 @@ static void register_xpu_device_info(PyObject* module) {
<< ", total_memory=" << info.global_mem_size / (1024 * 1024)
<< "MB, max_compute_units=" << info.max_compute_units
<< ", gpu_eu_count=" << info.gpu_eu_count
<< ", device_arch=" << info.device_arch << ")";
<< ", device_arch=" << info.device_arch
<< ", support_cl_bf16_conversion=" << info.support_cl_bf16_conversion
<< ", support_cl_sg_matmul_acc=" << info.support_cl_sg_matmul_acc
<< ", support_cl_sg_matmul_acc_tf32=" << info.support_cl_sg_matmul_acc_tf32
<< ", support_cl_sg_2d_block_io=" << info.support_cl_sg_2d_block_io
<< ")";
return stream.str();
});
}
Expand Down

0 comments on commit e66d1aa

Please sign in to comment.