Skip to content

Commit

Permalink
fix: clippy warnings from nightly rust 1.82 (#6348)
Browse files Browse the repository at this point in the history
Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
  • Loading branch information
waynexia authored Sep 3, 2024
1 parent 97ae9d7 commit d4be752
Show file tree
Hide file tree
Showing 9 changed files with 114 additions and 134 deletions.
2 changes: 1 addition & 1 deletion arrow-data/src/ffi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,6 @@ mod tests {

assert_eq!(0, private_data.buffers_ptr.len());

Box::into_raw(private_data);
let _ = Box::into_raw(private_data);
}
}
6 changes: 3 additions & 3 deletions arrow-ipc/src/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2572,7 +2572,7 @@ mod tests {
let mut fields = Vec::new();
let mut arrays = Vec::new();
for i in 0..num_cols {
let field = Field::new(&format!("col_{}", i), DataType::Decimal128(38, 10), true);
let field = Field::new(format!("col_{}", i), DataType::Decimal128(38, 10), true);
let array = Decimal128Array::from(vec![num_cols as i128; num_rows]);
fields.push(field);
arrays.push(Arc::new(array) as Arc<dyn Array>);
Expand Down Expand Up @@ -2627,7 +2627,7 @@ mod tests {
let mut fields = Vec::new();
let mut arrays = Vec::new();
for i in 0..num_cols {
let field = Field::new(&format!("col_{}", i), DataType::Decimal128(38, 10), true);
let field = Field::new(format!("col_{}", i), DataType::Decimal128(38, 10), true);
let array = Decimal128Array::from(vec![num_cols as i128; num_rows]);
fields.push(field);
arrays.push(Arc::new(array) as Arc<dyn Array>);
Expand Down Expand Up @@ -2682,7 +2682,7 @@ mod tests {
let mut fields = Vec::new();
let options = IpcWriteOptions::try_new(8, false, MetadataVersion::V5).unwrap();
for i in 0..num_cols {
let field = Field::new(&format!("col_{}", i), DataType::Decimal128(38, 10), true);
let field = Field::new(format!("col_{}", i), DataType::Decimal128(38, 10), true);
fields.push(field);
}
let schema = Schema::new(fields);
Expand Down
2 changes: 1 addition & 1 deletion arrow-ord/src/sort.rs
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ fn sort_fixed_size_list(
}

#[inline(never)]
fn sort_impl<T: ?Sized + Copy>(
fn sort_impl<T: Copy>(
options: SortOptions,
valids: &mut [(u32, T)],
nulls: &[u32],
Expand Down
3 changes: 1 addition & 2 deletions arrow-schema/src/fields.rs
Original file line number Diff line number Diff line change
Expand Up @@ -389,14 +389,13 @@ impl UnionFields {
let mut set = 0_u128;
type_ids
.into_iter()
.map(|idx| {
.inspect(|&idx| {
let mask = 1_u128 << idx;
if (set & mask) != 0 {
panic!("duplicate type id: {}", idx);
} else {
set |= mask;
}
idx
})
.zip(fields)
.collect()
Expand Down
10 changes: 3 additions & 7 deletions parquet/src/arrow/arrow_reader/statistics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -568,13 +568,9 @@ macro_rules! make_data_page_stats_iterator {
let next = self.iter.next();
match next {
Some((len, index)) => match index {
$index_type(native_index) => Some(
native_index
.indexes
.iter()
.map(|x| $func(x))
.collect::<Vec<_>>(),
),
$index_type(native_index) => {
Some(native_index.indexes.iter().map($func).collect::<Vec<_>>())
}
// No matching `Index` found;
// thus no statistics that can be extracted.
// We return vec![None; len] to effectively
Expand Down
8 changes: 4 additions & 4 deletions parquet/src/arrow/arrow_writer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3082,8 +3082,8 @@ mod tests {
let min = byte_array_stats.min_opt().unwrap();
let max = byte_array_stats.max_opt().unwrap();

assert_eq!(min.as_bytes(), &[b'a']);
assert_eq!(max.as_bytes(), &[b'd']);
assert_eq!(min.as_bytes(), b"a");
assert_eq!(max.as_bytes(), b"d");
} else {
panic!("expecting Statistics::ByteArray");
}
Expand Down Expand Up @@ -3154,8 +3154,8 @@ mod tests {
let min = byte_array_stats.min_opt().unwrap();
let max = byte_array_stats.max_opt().unwrap();

assert_eq!(min.as_bytes(), &[b'a']);
assert_eq!(max.as_bytes(), &[b'd']);
assert_eq!(min.as_bytes(), b"a");
assert_eq!(max.as_bytes(), b"d");
} else {
panic!("expecting Statistics::ByteArray");
}
Expand Down
7 changes: 2 additions & 5 deletions parquet/src/data_type.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1298,11 +1298,8 @@ mod tests {

#[test]
fn test_byte_array_from() {
assert_eq!(
ByteArray::from(vec![b'A', b'B', b'C']).data(),
&[b'A', b'B', b'C']
);
assert_eq!(ByteArray::from("ABC").data(), &[b'A', b'B', b'C']);
assert_eq!(ByteArray::from(b"ABC".to_vec()).data(), b"ABC");
assert_eq!(ByteArray::from("ABC").data(), b"ABC");
assert_eq!(
ByteArray::from(Bytes::from(vec![1u8, 2u8, 3u8, 4u8, 5u8])).data(),
&[1u8, 2u8, 3u8, 4u8, 5u8]
Expand Down
41 changes: 16 additions & 25 deletions parquet/src/file/metadata/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,18 +55,14 @@ impl<'a, W: Write> ThriftMetadataWriter<'a, W> {
// write offset index to the file
for (row_group_idx, row_group) in self.row_groups.iter_mut().enumerate() {
for (column_idx, column_metadata) in row_group.columns.iter_mut().enumerate() {
match &offset_indexes[row_group_idx][column_idx] {
Some(offset_index) => {
let start_offset = self.buf.bytes_written();
let mut protocol = TCompactOutputProtocol::new(&mut self.buf);
offset_index.write_to_out_protocol(&mut protocol)?;
let end_offset = self.buf.bytes_written();
// set offset and index for offset index
column_metadata.offset_index_offset = Some(start_offset as i64);
column_metadata.offset_index_length =
Some((end_offset - start_offset) as i32);
}
None => {}
if let Some(offset_index) = &offset_indexes[row_group_idx][column_idx] {
let start_offset = self.buf.bytes_written();
let mut protocol = TCompactOutputProtocol::new(&mut self.buf);
offset_index.write_to_out_protocol(&mut protocol)?;
let end_offset = self.buf.bytes_written();
// set offset and index for offset index
column_metadata.offset_index_offset = Some(start_offset as i64);
column_metadata.offset_index_length = Some((end_offset - start_offset) as i32);
}
}
}
Expand All @@ -84,18 +80,14 @@ impl<'a, W: Write> ThriftMetadataWriter<'a, W> {
// write column index to the file
for (row_group_idx, row_group) in self.row_groups.iter_mut().enumerate() {
for (column_idx, column_metadata) in row_group.columns.iter_mut().enumerate() {
match &column_indexes[row_group_idx][column_idx] {
Some(column_index) => {
let start_offset = self.buf.bytes_written();
let mut protocol = TCompactOutputProtocol::new(&mut self.buf);
column_index.write_to_out_protocol(&mut protocol)?;
let end_offset = self.buf.bytes_written();
// set offset and index for offset index
column_metadata.column_index_offset = Some(start_offset as i64);
column_metadata.column_index_length =
Some((end_offset - start_offset) as i32);
}
None => {}
if let Some(column_index) = &column_indexes[row_group_idx][column_idx] {
let start_offset = self.buf.bytes_written();
let mut protocol = TCompactOutputProtocol::new(&mut self.buf);
column_index.write_to_out_protocol(&mut protocol)?;
let end_offset = self.buf.bytes_written();
// set offset and index for offset index
column_metadata.column_index_offset = Some(start_offset as i64);
column_metadata.column_index_length = Some((end_offset - start_offset) as i32);
}
}
}
Expand Down Expand Up @@ -524,7 +516,6 @@ mod tests {
async fn load_metadata_from_bytes(file_size: usize, data: Bytes) -> ParquetMetaData {
use crate::arrow::async_reader::{MetadataFetch, MetadataLoader};
use crate::errors::Result as ParquetResult;
use bytes::Bytes;
use futures::future::BoxFuture;
use futures::FutureExt;
use std::ops::Range;
Expand Down
169 changes: 83 additions & 86 deletions parquet/src/schema/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,105 +294,102 @@ impl<'a> PrimitiveTypeBuilder<'a> {
));
}

match &self.logical_type {
Some(logical_type) => {
// If a converted type is populated, check that it is consistent with
// its logical type
if self.converted_type != ConvertedType::NONE {
if ConvertedType::from(self.logical_type.clone()) != self.converted_type {
return Err(general_err!(
"Logical type {:?} is incompatible with converted type {} for field '{}'",
logical_type,
self.converted_type,
self.name
));
}
} else {
// Populate the converted type for backwards compatibility
basic_info.converted_type = self.logical_type.clone().into();
if let Some(logical_type) = &self.logical_type {
// If a converted type is populated, check that it is consistent with
// its logical type
if self.converted_type != ConvertedType::NONE {
if ConvertedType::from(self.logical_type.clone()) != self.converted_type {
return Err(general_err!(
"Logical type {:?} is incompatible with converted type {} for field '{}'",
logical_type,
self.converted_type,
self.name
));
}
} else {
// Populate the converted type for backwards compatibility
basic_info.converted_type = self.logical_type.clone().into();
}
// Check that logical type and physical type are compatible
match (logical_type, self.physical_type) {
(LogicalType::Map, _) | (LogicalType::List, _) => {
return Err(general_err!(
"{:?} cannot be applied to a primitive type for field '{}'",
logical_type,
self.name
));
}
// Check that logical type and physical type are compatible
match (logical_type, self.physical_type) {
(LogicalType::Map, _) | (LogicalType::List, _) => {
(LogicalType::Enum, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Decimal { scale, precision }, _) => {
// Check that scale and precision are consistent with legacy values
if *scale != self.scale {
return Err(general_err!(
"{:?} cannot be applied to a primitive type for field '{}'",
logical_type,
"DECIMAL logical type scale {} must match self.scale {} for field '{}'",
scale,
self.scale,
self.name
));
}
(LogicalType::Enum, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Decimal { scale, precision }, _) => {
// Check that scale and precision are consistent with legacy values
if *scale != self.scale {
return Err(general_err!(
"DECIMAL logical type scale {} must match self.scale {} for field '{}'",
scale,
self.scale,
self.name
));
}
if *precision != self.precision {
return Err(general_err!(
"DECIMAL logical type precision {} must match self.precision {} for field '{}'",
precision,
self.precision,
self.name
));
}
self.check_decimal_precision_scale()?;
}
(LogicalType::Date, PhysicalType::INT32) => {}
(
LogicalType::Time {
unit: TimeUnit::MILLIS(_),
..
},
PhysicalType::INT32,
) => {}
(LogicalType::Time { unit, .. }, PhysicalType::INT64) => {
if *unit == TimeUnit::MILLIS(Default::default()) {
return Err(general_err!(
"Cannot use millisecond unit on INT64 type for field '{}'",
self.name
));
}
}
(LogicalType::Timestamp { .. }, PhysicalType::INT64) => {}
(LogicalType::Integer { bit_width, .. }, PhysicalType::INT32)
if *bit_width <= 32 => {}
(LogicalType::Integer { bit_width, .. }, PhysicalType::INT64)
if *bit_width == 64 => {}
// Null type
(LogicalType::Unknown, PhysicalType::INT32) => {}
(LogicalType::String, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Json, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Bson, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Uuid, PhysicalType::FIXED_LEN_BYTE_ARRAY) if self.length == 16 => {}
(LogicalType::Uuid, PhysicalType::FIXED_LEN_BYTE_ARRAY) => {
return Err(general_err!(
"UUID cannot annotate field '{}' because it is not a FIXED_LEN_BYTE_ARRAY(16) field",
self.name
))
}
(LogicalType::Float16, PhysicalType::FIXED_LEN_BYTE_ARRAY)
if self.length == 2 => {}
(LogicalType::Float16, PhysicalType::FIXED_LEN_BYTE_ARRAY) => {
if *precision != self.precision {
return Err(general_err!(
"FLOAT16 cannot annotate field '{}' because it is not a FIXED_LEN_BYTE_ARRAY(2) field",
"DECIMAL logical type precision {} must match self.precision {} for field '{}'",
precision,
self.precision,
self.name
))
));
}
(a, b) => {
self.check_decimal_precision_scale()?;
}
(LogicalType::Date, PhysicalType::INT32) => {}
(
LogicalType::Time {
unit: TimeUnit::MILLIS(_),
..
},
PhysicalType::INT32,
) => {}
(LogicalType::Time { unit, .. }, PhysicalType::INT64) => {
if *unit == TimeUnit::MILLIS(Default::default()) {
return Err(general_err!(
"Cannot annotate {:?} from {} for field '{}'",
a,
b,
"Cannot use millisecond unit on INT64 type for field '{}'",
self.name
))
));
}
}
(LogicalType::Timestamp { .. }, PhysicalType::INT64) => {}
(LogicalType::Integer { bit_width, .. }, PhysicalType::INT32)
if *bit_width <= 32 => {}
(LogicalType::Integer { bit_width, .. }, PhysicalType::INT64)
if *bit_width == 64 => {}
// Null type
(LogicalType::Unknown, PhysicalType::INT32) => {}
(LogicalType::String, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Json, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Bson, PhysicalType::BYTE_ARRAY) => {}
(LogicalType::Uuid, PhysicalType::FIXED_LEN_BYTE_ARRAY) if self.length == 16 => {}
(LogicalType::Uuid, PhysicalType::FIXED_LEN_BYTE_ARRAY) => {
return Err(general_err!(
"UUID cannot annotate field '{}' because it is not a FIXED_LEN_BYTE_ARRAY(16) field",
self.name
))
}
(LogicalType::Float16, PhysicalType::FIXED_LEN_BYTE_ARRAY)
if self.length == 2 => {}
(LogicalType::Float16, PhysicalType::FIXED_LEN_BYTE_ARRAY) => {
return Err(general_err!(
"FLOAT16 cannot annotate field '{}' because it is not a FIXED_LEN_BYTE_ARRAY(2) field",
self.name
))
}
(a, b) => {
return Err(general_err!(
"Cannot annotate {:?} from {} for field '{}'",
a,
b,
self.name
))
}
}
None => {}
}

match self.converted_type {
Expand Down

0 comments on commit d4be752

Please sign in to comment.