Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: fix clippy lints from nightly-2025-03-16 #11273

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions compute_tools/src/catalog.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,13 +98,15 @@ pub async fn get_database_schema(
.kill_on_drop(true)
.spawn()?;

let stdout = cmd.stdout.take().ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stdout.")
})?;
let stdout = cmd
.stdout
.take()
.ok_or_else(|| std::io::Error::other("Failed to capture stdout."))?;

let stderr = cmd.stderr.take().ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::Other, "Failed to capture stderr.")
})?;
let stderr = cmd
.stderr
.take()
.ok_or_else(|| std::io::Error::other("Failed to capture stderr."))?;

let mut stdout_reader = FramedRead::new(stdout, BytesCodec::new());
let stderr_reader = BufReader::new(stderr);
Expand All @@ -128,8 +130,7 @@ pub async fn get_database_schema(
}
});

return Err(SchemaDumpError::IO(std::io::Error::new(
std::io::ErrorKind::Other,
return Err(SchemaDumpError::IO(std::io::Error::other(
"failed to start pg_dump",
)));
}
Expand Down
2 changes: 1 addition & 1 deletion compute_tools/src/spec_apply.rs
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ impl ComputeNode {
.iter()
.filter_map(|val| val.parse::<usize>().ok())
.map(|val| if val > 1 { val - 1 } else { 1 })
.last()
.next_back()
.unwrap_or(3)
}
}
Expand Down
2 changes: 1 addition & 1 deletion control_plane/storcon_cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ async fn main() -> anyhow::Result<()> {
let mut node_to_fill_descs = Vec::new();

for desc in node_descs {
let to_drain = nodes.iter().any(|id| *id == desc.id);
let to_drain = nodes.contains(&desc.id);
if to_drain {
node_to_drain_descs.push(desc);
} else {
Expand Down
7 changes: 3 additions & 4 deletions libs/postgres_backend/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
#![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)]
use std::future::Future;
use std::io::ErrorKind;
use std::net::SocketAddr;
use std::os::fd::{AsRawFd, RawFd};
use std::pin::Pin;
Expand Down Expand Up @@ -227,7 +226,7 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> MaybeWriteOnly<IO> {
match self {
MaybeWriteOnly::Full(framed) => framed.read_startup_message().await,
MaybeWriteOnly::WriteOnly(_) => {
Err(io::Error::new(ErrorKind::Other, "reading from write only half").into())
Err(io::Error::other("reading from write only half").into())
}
MaybeWriteOnly::Broken => panic!("IO on invalid MaybeWriteOnly"),
}
Expand All @@ -237,7 +236,7 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> MaybeWriteOnly<IO> {
match self {
MaybeWriteOnly::Full(framed) => framed.read_message().await,
MaybeWriteOnly::WriteOnly(_) => {
Err(io::Error::new(ErrorKind::Other, "reading from write only half").into())
Err(io::Error::other("reading from write only half").into())
}
MaybeWriteOnly::Broken => panic!("IO on invalid MaybeWriteOnly"),
}
Expand Down Expand Up @@ -975,7 +974,7 @@ impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for CopyDataWriter<'_, IO> {
.write_message_noflush(&BeMessage::CopyData(buf))
// write_message only writes to the buffer, so it can fail iff the
// message is invaid, but CopyData can't be invalid.
.map_err(|_| io::Error::new(ErrorKind::Other, "failed to serialize CopyData"))?;
.map_err(|_| io::Error::other("failed to serialize CopyData"))?;

Poll::Ready(Ok(buf.len()))
}
Expand Down
4 changes: 2 additions & 2 deletions libs/postgres_backend/tests/simple_select.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ static KEY: Lazy<rustls::pki_types::PrivateKeyDer<'static>> = Lazy::new(|| {

static CERT: Lazy<rustls::pki_types::CertificateDer<'static>> = Lazy::new(|| {
let mut cursor = Cursor::new(include_bytes!("cert.pem"));
let cert = rustls_pemfile::certs(&mut cursor).next().unwrap().unwrap();
cert

rustls_pemfile::certs(&mut cursor).next().unwrap().unwrap()
});

// test that basic select with ssl works
Expand Down
2 changes: 1 addition & 1 deletion libs/pq_proto/src/framed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ impl ConnectionError {
pub fn into_io_error(self) -> io::Error {
match self {
ConnectionError::Io(io) => io,
ConnectionError::Protocol(pe) => io::Error::new(io::ErrorKind::Other, pe.to_string()),
ConnectionError::Protocol(pe) => io::Error::other(pe.to_string()),
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion libs/pq_proto/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ pub enum ProtocolError {
impl ProtocolError {
/// Proxy stream.rs uses only io::Error; provide it.
pub fn into_io_error(self) -> io::Error {
io::Error::new(io::ErrorKind::Other, self.to_string())
io::Error::other(self.to_string())
}
}

Expand Down
9 changes: 3 additions & 6 deletions libs/proxy/postgres-protocol2/src/authentication/sasl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ impl ScramSha256 {
password,
channel_binding,
} => (nonce, password, channel_binding),
_ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")),
_ => return Err(io::Error::other("invalid SCRAM state")),
};

let message =
Expand Down Expand Up @@ -291,7 +291,7 @@ impl ScramSha256 {
server_key,
auth_message,
} => (server_key, auth_message),
_ => return Err(io::Error::new(io::ErrorKind::Other, "invalid SCRAM state")),
_ => return Err(io::Error::other("invalid SCRAM state")),
};

let message =
Expand All @@ -301,10 +301,7 @@ impl ScramSha256 {

let verifier = match parsed {
ServerFinalMessage::Error(e) => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("SCRAM error: {}", e),
));
return Err(io::Error::other(format!("SCRAM error: {}", e)));
}
ServerFinalMessage::Verifier(verifier) => verifier,
};
Expand Down
5 changes: 2 additions & 3 deletions libs/remote_storage/src/azure_blob.rs
Original file line number Diff line number Diff line change
Expand Up @@ -801,8 +801,7 @@ where
// that support needs to be hacked in.
//
// including {self:?} into the message would be useful, but unsure how to unproject.
_ => std::task::Poll::Ready(Err(std::io::Error::new(
std::io::ErrorKind::Other,
_ => std::task::Poll::Ready(Err(std::io::Error::other(
"cloned or initial values cannot be read",
))),
}
Expand Down Expand Up @@ -855,7 +854,7 @@ where
};
Err(azure_core::error::Error::new(
azure_core::error::ErrorKind::Io,
std::io::Error::new(std::io::ErrorKind::Other, msg),
std::io::Error::other(msg),
))
}

Expand Down
2 changes: 1 addition & 1 deletion libs/remote_storage/tests/test_real_s3.rs
Original file line number Diff line number Diff line change
Expand Up @@ -558,7 +558,7 @@ async fn upload_large_enough_file(
) -> usize {
let header = bytes::Bytes::from_static("remote blob data content".as_bytes());
let body = bytes::Bytes::from(vec![0u8; 1024]);
let contents = std::iter::once(header).chain(std::iter::repeat(body).take(128));
let contents = std::iter::once(header).chain(std::iter::repeat_n(body, 128));

let len = contents.clone().fold(0, |acc, next| acc + next.len());

Expand Down
9 changes: 3 additions & 6 deletions libs/utils/src/crashsafe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,9 @@ pub fn path_with_suffix_extension(
}

pub fn fsync_file_and_parent(file_path: &Utf8Path) -> io::Result<()> {
let parent = file_path.parent().ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
format!("File {file_path:?} has no parent"),
)
})?;
let parent = file_path
.parent()
.ok_or_else(|| io::Error::other(format!("File {file_path:?} has no parent")))?;

fsync(file_path)?;
fsync(parent)?;
Expand Down
12 changes: 6 additions & 6 deletions pageserver/src/http/routes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3330,11 +3330,11 @@ async fn put_tenant_timeline_import_basebackup(

let broker_client = state.broker_client.clone();

let mut body = StreamReader::new(request.into_body().map(|res| {
res.map_err(|error| {
std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
})
}));
let mut body = StreamReader::new(
request
.into_body()
.map(|res| res.map_err(|error| std::io::Error::other(anyhow::anyhow!(error)))),
);

tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;

Expand Down Expand Up @@ -3407,7 +3407,7 @@ async fn put_tenant_timeline_import_wal(

let mut body = StreamReader::new(request.into_body().map(|res| {
res.map_err(|error| {
std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
std::io::Error::other( anyhow::anyhow!(error))
})
}));

Expand Down
3 changes: 1 addition & 2 deletions pageserver/src/task_mgr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,7 @@ pageserver_runtime!(MGMT_REQUEST_RUNTIME, "mgmt request worker");
pageserver_runtime!(WALRECEIVER_RUNTIME, "walreceiver worker");
pageserver_runtime!(BACKGROUND_RUNTIME, "background op worker");
// Bump this number when adding a new pageserver_runtime!
// SAFETY: it's obviously correct
const NUM_MULTIPLE_RUNTIMES: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(4) };
const NUM_MULTIPLE_RUNTIMES: NonZeroUsize = NonZeroUsize::new(4).unwrap();

#[derive(Debug, Clone, Copy)]
pub struct PageserverTaskId(u64);
Expand Down
9 changes: 4 additions & 5 deletions pageserver/src/tenant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -912,6 +912,7 @@ enum StartCreatingTimelineResult {
Idempotent(Arc<Timeline>),
}

#[allow(clippy::large_enum_variant, reason = "TODO")]
enum TimelineInitAndSyncResult {
ReadyToActivate(Arc<Timeline>),
NeedsSpawnImportPgdata(TimelineInitAndSyncNeedsSpawnImportPgdata),
Expand Down Expand Up @@ -998,6 +999,7 @@ enum CreateTimelineCause {
Delete,
}

#[allow(clippy::large_enum_variant, reason = "TODO")]
enum LoadTimelineCause {
Attach,
Unoffload,
Expand Down Expand Up @@ -3678,7 +3680,7 @@ impl Tenant {
}
}
}
TenantState::Active { .. } => {
TenantState::Active => {
return Ok(());
}
TenantState::Broken { reason, .. } => {
Expand Down Expand Up @@ -4383,10 +4385,7 @@ impl Tenant {
.to_string();

fail::fail_point!("tenant-config-before-write", |_| {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"tenant-config-before-write",
))
Err(std::io::Error::other("tenant-config-before-write"))
});

// Convert the config to a toml file.
Expand Down
7 changes: 2 additions & 5 deletions pageserver/src/tenant/blob_io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
//! len >= 128: 1CCCXXXX XXXXXXXX XXXXXXXX XXXXXXXX
//!
use std::cmp::min;
use std::io::{Error, ErrorKind};
use std::io::Error;

use async_compression::Level;
use bytes::{BufMut, BytesMut};
Expand Down Expand Up @@ -331,10 +331,7 @@ impl<const BUFFERED: bool> BlobWriter<BUFFERED> {
return (
(
io_buf.slice_len(),
Err(Error::new(
ErrorKind::Other,
format!("blob too large ({len} bytes)"),
)),
Err(Error::other(format!("blob too large ({len} bytes)"))),
),
srcbuf,
);
Expand Down
8 changes: 2 additions & 6 deletions pageserver/src/tenant/block_io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,12 +216,8 @@ impl<'a> FileBlockReader<'a> {
match cache
.read_immutable_buf(self.file_id, blknum, ctx)
.await
.map_err(|e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("Failed to read immutable buf: {e:#}"),
)
})? {
.map_err(|e| std::io::Error::other(format!("Failed to read immutable buf: {e:#}")))?
{
ReadBufResult::Found(guard) => Ok(guard.into()),
ReadBufResult::NotFound(write_guard) => {
// Read the page from disk into the buffer
Expand Down
11 changes: 4 additions & 7 deletions pageserver/src/tenant/ephemeral_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,13 +149,10 @@ impl EphemeralFile {
let pos = self.bytes_written;

let new_bytes_written = pos.checked_add(srcbuf.len().into_u64()).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!(
"write would grow EphemeralFile beyond u64::MAX: len={pos} writen={srcbuf_len}",
srcbuf_len = srcbuf.len(),
),
)
std::io::Error::other(format!(
"write would grow EphemeralFile beyond u64::MAX: len={pos} writen={srcbuf_len}",
srcbuf_len = srcbuf.len(),
))
})?;

// Write the payload
Expand Down
2 changes: 1 addition & 1 deletion pageserver/src/tenant/layer_map/layer_coverage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ impl<Value: Clone> LayerCoverage<Value> {
///
/// Complexity: O(log N)
fn add_node(&mut self, key: i128) {
let value = match self.nodes.range(..=key).last() {
let value = match self.nodes.range(..=key).next_back() {
Some((_, Some(v))) => Some(v.clone()),
Some((_, None)) => None,
None => None,
Expand Down
2 changes: 1 addition & 1 deletion pageserver/src/tenant/mgr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ use crate::{InitializationOrder, TEMP_FILE_SUFFIX};

/// For a tenant that appears in TenantsMap, it may either be
/// - `Attached`: has a full Tenant object, is elegible to service
/// reads and ingest WAL.
/// reads and ingest WAL.
/// - `Secondary`: is only keeping a local cache warm.
///
/// Secondary is a totally distinct state rather than being a mode of a `Tenant`, because
Expand Down
2 changes: 1 addition & 1 deletion pageserver/src/tenant/remote_timeline_client/index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ impl IndexPart {
/// Version history
/// - 2: added `deleted_at`
/// - 3: no longer deserialize `timeline_layers` (serialized format is the same, but timeline_layers
/// is always generated from the keys of `layer_metadata`)
/// is always generated from the keys of `layer_metadata`)
/// - 4: timeline_layers is fully removed.
/// - 5: lineage was added
/// - 6: last_aux_file_policy is added.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ impl SplitDeltaLayerWriter {
)
.await?;
let (start_key, prev_delta_writer) =
std::mem::replace(&mut self.inner, Some((key, next_delta_writer))).unwrap();
self.inner.replace((key, next_delta_writer)).unwrap();
self.batches.add_unfinished_delta_writer(
prev_delta_writer,
start_key..key,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ mod tests {
rand::Rng::fill(&mut rand::thread_rng(), &mut dst_slice[len..]); // to discover bugs
Ok((dst, len))
}
Err(e) => Err(std::io::Error::new(std::io::ErrorKind::Other, e)),
Err(e) => Err(std::io::Error::other(e)),
}
}
}
Expand Down
1 change: 1 addition & 0 deletions pageserver/src/tenant/storage_layer/merge_iterator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ impl LayerIterRef<'_> {
/// 1. Unified iterator for image and delta layers.
/// 2. `Ord` for use in [`MergeIterator::heap`] (for the k-merge).
/// 3. Lazy creation of the real delta/image iterator.
#[allow(clippy::large_enum_variant, reason = "TODO")]
pub(crate) enum IteratorWrapper<'a> {
NotLoaded {
ctx: &'a RequestContext,
Expand Down
3 changes: 2 additions & 1 deletion pageserver/src/tenant/timeline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1025,6 +1025,7 @@ pub(crate) enum ShutdownMode {
Hard,
}

#[allow(clippy::large_enum_variant, reason = "TODO")]
enum ImageLayerCreationOutcome {
/// We generated an image layer
Generated {
Expand Down Expand Up @@ -2226,7 +2227,7 @@ impl Timeline {
.await
.expect("holding a reference to self");
}
TimelineState::Active { .. } => {
TimelineState::Active => {
return Ok(());
}
TimelineState::Broken { .. } | TimelineState::Stopping => {
Expand Down
1 change: 1 addition & 0 deletions pageserver/src/tenant/upload_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,7 @@ pub struct UploadQueueStoppedDeletable {
pub(super) deleted_at: SetDeletedFlagProgress,
}

#[allow(clippy::large_enum_variant, reason = "TODO")]
pub enum UploadQueueStopped {
Deletable(UploadQueueStoppedDeletable),
Uninitialized,
Expand Down
Loading
Loading