chore: update tokio to 0.3

Signed-off-by: ljedrz <ljedrz@gmail.com>
This commit is contained in:
ljedrz
2020-11-17 13:23:42 +01:00
committed by Joonas Koivunen
parent c0657bf739
commit 7e51079010
28 changed files with 152 additions and 130 deletions

78
Cargo.lock generated
View File

@ -299,6 +299,12 @@ version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"
[[package]]
name = "bytes"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0dcbc35f504eb6fc275a6d20e4ebcda18cf50d40ba6fabff8c711fa16cb3b16"
[[package]]
name = "cast"
version = "0.2.3"
@ -682,7 +688,7 @@ version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cc2157cca35ae62d3d6281d876cc1b3f0f71a3409fe4b7c8d8790b441e9bc2b"
dependencies = [
"bytes",
"bytes 0.5.6",
"rand",
"smallvec",
]
@ -693,7 +699,7 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "836b2fbda90bc12569223077c1fa0a62a8eee8b78f4e5d82a8f20b20ca1ccf0f"
dependencies = [
"bytes",
"bytes 0.5.6",
"domain",
"futures",
"rand",
@ -915,7 +921,7 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b"
dependencies = [
"bytes",
"bytes 0.5.6",
"futures",
"memchr",
"pin-project 0.4.26",
@ -975,7 +981,7 @@ version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "993f9e0baeed60001cf565546b0d3dbe6a6ad23f2bd31644a133c641eccf6d53"
dependencies = [
"bytes",
"bytes 0.5.6",
"fnv",
"futures-core",
"futures-sink",
@ -1008,7 +1014,7 @@ checksum = "ed18eb2459bf1a09ad2d6b1547840c3e5e62882fa09b9a6a20b1de8e3228848f"
dependencies = [
"base64",
"bitflags",
"bytes",
"bytes 0.5.6",
"headers-core",
"http",
"mime",
@ -1076,7 +1082,7 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9"
dependencies = [
"bytes",
"bytes 0.5.6",
"fnv",
"itoa",
]
@ -1087,7 +1093,7 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b"
dependencies = [
"bytes",
"bytes 0.5.6",
"http",
]
@ -1115,7 +1121,7 @@ version = "0.13.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f3afcfae8af5ad0576a31e768415edb627824129e8e5a29b8bfccb2f234e835"
dependencies = [
"bytes",
"bytes 0.5.6",
"futures-channel",
"futures-core",
"futures-util",
@ -1214,7 +1220,7 @@ dependencies = [
"async-trait",
"base64",
"byteorder",
"bytes",
"bytes 0.5.6",
"cid",
"dirs",
"domain",
@ -1239,7 +1245,7 @@ dependencies = [
"sled",
"tempfile",
"thiserror",
"tokio 0.2.22",
"tokio 0.3.6",
"tracing",
"tracing-futures",
"tracing-subscriber",
@ -1259,7 +1265,7 @@ dependencies = [
"prost",
"prost-build",
"thiserror",
"tokio 0.2.22",
"tokio 0.3.6",
"tracing",
"unsigned-varint 0.3.3",
]
@ -1270,7 +1276,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"async-stream",
"bytes",
"bytes 0.5.6",
"cid",
"futures",
"hex-literal",
@ -1291,7 +1297,7 @@ dependencies = [
"tar",
"tempfile",
"thiserror",
"tokio 0.2.22",
"tokio 0.3.6",
"tracing",
"tracing-subscriber",
"url",
@ -1390,7 +1396,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3c2b4c99f8798be90746fc226acf95d3e6cff0655883634cc30dab1f64f438b"
dependencies = [
"atomic",
"bytes",
"bytes 0.5.6",
"futures",
"lazy_static",
"libp2p-core",
@ -1510,7 +1516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78a2653b2e3254a3bbeb66bfc3f0dca7d6cba6aa2a96791db114003dec1b5394"
dependencies = [
"arrayvec",
"bytes",
"bytes 0.5.6",
"either",
"fnv",
"futures",
@ -1558,7 +1564,7 @@ version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed764eab613a8fb6b7dcf6c796f55a06fef2270e528329903e25cd3311b99663"
dependencies = [
"bytes",
"bytes 0.5.6",
"futures",
"futures_codec",
"libp2p-core",
@ -1576,7 +1582,7 @@ version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb441fb015ec16690099c5d910fcba271d357763b3dcb784db7b27bbb0b68372"
dependencies = [
"bytes",
"bytes 0.5.6",
"curve25519-dalek",
"futures",
"lazy_static",
@ -1808,7 +1814,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea338ffba87fdaaea364b314301ead2ee38e584a7a7b06e881ca101a2a059d0f"
dependencies = [
"anyhow",
"bytes",
"bytes 0.5.6",
"futures",
"http",
"httparse",
@ -1856,7 +1862,7 @@ version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93faf2e41f9ee62fb01680ed48f3cc26652352327aa2e59869070358f6b7dd75"
dependencies = [
"bytes",
"bytes 0.5.6",
"futures",
"log",
"pin-project 1.0.4",
@ -2182,7 +2188,7 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212"
dependencies = [
"bytes",
"bytes 0.5.6",
"prost-derive",
]
@ -2192,7 +2198,7 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02b10678c913ecbd69350e8535c3aef91a8676c0773fc1d7b95cdd196d7f2f26"
dependencies = [
"bytes",
"bytes 0.5.6",
"heck",
"itertools 0.8.2",
"log",
@ -2223,7 +2229,7 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1834f67c0697c001304b75be76f67add9c89742eda3a085ad8ee0bb38c3417aa"
dependencies = [
"bytes",
"bytes 0.5.6",
"prost",
]
@ -2827,17 +2833,16 @@ version = "0.2.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d34ca54d84bf2b5b4d7d31e901a8464f7b60ac145a284fba25ceb801f2ddccd"
dependencies = [
"bytes",
"bytes 0.5.6",
"fnv",
"futures-core",
"iovec",
"lazy_static",
"memchr",
"mio 0.6.22",
"num_cpus",
"pin-project-lite 0.1.10",
"slab",
"tokio-macros",
"tokio-macros 0.2.5",
]
[[package]]
@ -2847,9 +2852,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "720ba21c25078711bf456d607987d95bce90f7c3bea5abe1db587862e7a1e87c"
dependencies = [
"autocfg",
"bytes 0.6.0",
"futures-core",
"libc",
"memchr",
"mio 0.7.7",
"num_cpus",
"pin-project-lite 0.2.4",
"slab",
"tokio-macros 0.3.2",
]
[[package]]
@ -2863,13 +2874,24 @@ dependencies = [
"syn",
]
[[package]]
name = "tokio-macros"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46dfffa59fc3c8aad216ed61bdc2c263d2b9d87a9c8ac9de0c11a813e51b6db7"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tokio-util"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499"
dependencies = [
"bytes",
"bytes 0.5.6",
"futures-core",
"futures-sink",
"log",
@ -3050,7 +3072,7 @@ version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35"
dependencies = [
"bytes",
"bytes 0.5.6",
"futures_codec",
]
@ -3132,7 +3154,7 @@ version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f41be6df54c97904af01aa23e613d4521eed7ab23537cede692d4058f6449407"
dependencies = [
"bytes",
"bytes 0.5.6",
"futures",
"headers",
"http",

View File

@ -40,7 +40,7 @@ rand = { default-features = false, version = "0.7" }
serde = { default-features = false, features = ["derive"], version = "1.0" }
serde_json = { default-features = false, features = ["std"], version = "1.0" }
thiserror = { default-features = false, version = "1.0" }
tokio = { default-features = false, features = ["fs", "rt-threaded", "stream", "sync", "blocking"], version = "0.2" }
tokio = { default-features = false, features = ["fs", "macros", "rt-multi-thread", "stream", "sync"], version = "0.3" }
tracing = { default-features = false, features = ["log"], version = "0.1" }
tracing-futures = { default-features = false, features = ["std", "futures-03"], version = "0.2" }
void = { default-features = false, version = "1.0" }
@ -59,7 +59,7 @@ prost-build = { default-features = false, version = "0.6" }
[dev-dependencies]
hex-literal = { default-features = false, version = "0.3" }
sha2 = { default-features = false, version = "0.9" }
tokio = { default-features = false, features = ["io-std"], version = "0.2" }
tokio = { default-features = false, features = ["io-std", "io-util"], version = "0.3" }
tracing-subscriber = { default-features = false, features = ["fmt", "tracing-log", "ansi", "env-filter"], version = "0.2" }
[workspace]

View File

@ -19,6 +19,6 @@ libp2p-swarm = { default-features = false, version = "0.24" }
multihash = { default-features = false, version = "0.11" }
prost = { default-features = false, version = "0.6" }
thiserror = { default-features = false, version = "1.0" }
tokio = { default-features = false, version = "0.2" }
tokio = { default-features = false, version = "0.3" }
tracing = { default-features = false, version = "0.1" }
unsigned-varint = { default-features = false, version = "0.3" }

View File

@ -31,7 +31,7 @@ serde_json = { default-features = false, version = "1.0" }
structopt = { default-features = false, version = "0.3" }
tar = { default-features = false, version = "0.4" }
thiserror = { default-features = false, version = "1.0" }
tokio = { default-features = false, version = "0.2" }
tokio = { default-features = false, features = ["time"], version = "0.3" }
tracing = { default-features = false, features = ["log"], version = "0.1" }
tracing-subscriber = { default-features = false, features = ["fmt", "tracing-log", "env-filter"], version = "0.2" }
url = { default-features = false, version = "2.1" }

View File

@ -130,7 +130,7 @@ fn main() {
// TODO: sigterm should initiate graceful shutdown, second time should shutdown right now
// NOTE: sigkill ... well surely it will stop the process right away
let mut rt = tokio::runtime::Runtime::new().expect("Failed to create event loop");
let rt = tokio::runtime::Runtime::new().expect("Failed to create event loop");
rt.block_on(async move {
let opts = IpfsOptions {

View File

@ -155,7 +155,7 @@ pub fn routes<T: IpfsTypes>(
}
pub(crate) async fn handle_shutdown(
mut tx: tokio::sync::mpsc::Sender<()>,
tx: tokio::sync::mpsc::Sender<()>,
) -> Result<impl warp::Reply, std::convert::Infallible> {
Ok(match tx.send(()).await {
Ok(_) => warp::http::StatusCode::OK,
@ -185,7 +185,7 @@ mod tests {
routes(&ipfs, shutdown_tx)
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn not_found_as_plaintext() {
let routes = testing_routes().await;
let resp = warp::test::request()
@ -199,7 +199,7 @@ mod tests {
assert_eq!(resp.body(), "404 page not found");
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn invalid_peer_id_as_messageresponse() {
let routes = testing_routes().await;
let resp = warp::test::request()

View File

@ -536,7 +536,7 @@ mod tests {
})
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn url_hacked_args() {
let response = request()
.path("/pubsub/pub?arg=some_channel&arg=foobar")
@ -546,7 +546,7 @@ mod tests {
assert_eq!(body, r#"{"message":"foobar","topic":"some_channel"}"#);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn message_in_body() {
let response = request()
.path("/pubsub/pub?arg=some_channel")

View File

@ -216,7 +216,7 @@ mod tests {
use std::collections::HashSet;
use std::convert::TryFrom;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_inner_local() {
let filter = local(&*preloaded_testing_ipfs().await);
@ -263,7 +263,7 @@ mod tests {
assert!(diff.is_empty(), "{:?}", diff);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn refs_with_path() {
let ipfs = preloaded_testing_ipfs().await;

View File

@ -303,7 +303,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn very_long_file_and_symlink_names() {
let ipfs = Node::new("test_node").await;
@ -359,7 +359,7 @@ mod tests {
assert_eq!(found, expected);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn get_multiblock_file() {
let ipfs = Node::new("test_node").await;

View File

@ -377,7 +377,7 @@ impl<D: fmt::Display> serde::Serialize for Quoted<D> {
mod tests {
use crate::v0::root_files::add;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn add_single_block_file() {
let ipfs = tokio_ipfs().await;

View File

@ -5,7 +5,7 @@
use futures::future::{Either, Map};
use std::future::Future;
use std::time::Duration;
use tokio::time::{timeout, Elapsed, Timeout};
use tokio::time::{error::Elapsed, timeout, Timeout};
pub type MaybeTimeout<F> =
Either<Timeout<F>, Map<F, fn(<F as Future>::Output) -> Result<<F as Future>::Output, Elapsed>>>;

View File

@ -600,7 +600,7 @@ mod tests {
use super::*;
use crate::{make_ipld, Node};
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_resolve_root_cid() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -610,7 +610,7 @@ mod tests {
assert_eq!(res, data);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_resolve_array_elem() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -623,7 +623,7 @@ mod tests {
assert_eq!(res, make_ipld!(2));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_resolve_nested_array_elem() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -636,7 +636,7 @@ mod tests {
assert_eq!(res, make_ipld!(2));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_resolve_object_elem() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -651,7 +651,7 @@ mod tests {
assert_eq!(res, make_ipld!(false));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_resolve_cid_elem() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -845,7 +845,7 @@ mod tests {
);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn resolve_through_link() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -874,7 +874,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn fail_resolving_first_segment() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -890,7 +890,7 @@ mod tests {
assert_eq!(e.to_string(), format!("no link named \"1\" under {}", cid2));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn fail_resolving_last_segment() {
let Node { ipfs, .. } = Node::new("test_node").await;
let dag = IpldDag::new(ipfs);
@ -906,7 +906,7 @@ mod tests {
assert_eq!(e.to_string(), format!("no link named \"a\" under {}", cid1));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn fail_resolving_through_file() {
let Node { ipfs, .. } = Node::new("test_node").await;
@ -938,7 +938,7 @@ mod tests {
);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn fail_resolving_through_dir() {
let Node { ipfs, .. } = Node::new("test_node").await;

View File

@ -120,14 +120,14 @@ pub async fn resolve(domain: &str) -> Result<IpfsPath, Error> {
mod tests {
use super::*;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[ignore]
async fn test_resolve1() {
let res = resolve("ipfs.io").await.unwrap().to_string();
assert_eq!(res, "/ipns/website.ipfs.io");
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[ignore]
async fn test_resolve2() {
let res = resolve("website.ipfs.io").await.unwrap().to_string();

View File

@ -1751,7 +1751,7 @@ mod tests {
use crate::make_ipld;
use multihash::Sha2_256;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_put_and_get_block() {
let ipfs = Node::new("test_node").await;
@ -1764,7 +1764,7 @@ mod tests {
assert_eq!(block, new_block);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_put_and_get_dag() {
let ipfs = Node::new("test_node").await;
@ -1774,7 +1774,7 @@ mod tests {
assert_eq!(data, new_data);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_pin_and_unpin() {
let ipfs = Node::new("test_node").await;

View File

@ -267,7 +267,7 @@ mod tests {
use libp2p::{multiaddr::Protocol, multihash::Multihash, swarm::Swarm};
use std::convert::TryInto;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn swarm_api() {
let (peer1_id, trans) = mk_transport();
let mut swarm1 = Swarm::new(trans, SwarmApi::default(), peer1_id.clone());

View File

@ -369,7 +369,7 @@ mod tests {
assert_eq!(links, ["african.txt", "americas.txt", "australian.txt",]);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn all_refs_from_root() {
let Node { ipfs, .. } = preloaded_testing_ipfs().await;
@ -416,7 +416,7 @@ mod tests {
assert_edges(&expected, all_edges.as_slice());
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn all_unique_refs_from_root() {
let Node { ipfs, .. } = preloaded_testing_ipfs().await;

View File

@ -56,7 +56,7 @@ macro_rules! pinstore_interface_tests {
use std::collections::HashMap;
use std::convert::TryFrom;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn pin_direct_twice_is_good() {
let repo = DSTestContext::with($factory).await;
@ -84,7 +84,7 @@ macro_rules! pinstore_interface_tests {
);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn cannot_recursively_unpin_unpinned() {
let repo = DSTestContext::with($factory).await;
// root/nested/deeper: QmX5S2xLu32K6WxWnyLeChQFbDHy79ULV9feJYH2Hy9bgp
@ -103,7 +103,7 @@ macro_rules! pinstore_interface_tests {
assert_eq!(e.to_string(), "not pinned or pinned indirectly");
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn cannot_unpin_indirect() {
let repo = DSTestContext::with($factory).await;
// root/nested/deeper: QmX5S2xLu32K6WxWnyLeChQFbDHy79ULV9feJYH2Hy9bgp
@ -146,7 +146,7 @@ macro_rules! pinstore_interface_tests {
assert_eq!(e.to_string(), "not pinned or pinned indirectly");
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn can_pin_direct_as_recursive() {
// the other way around doesn't work
let repo = DSTestContext::with($factory).await;
@ -188,7 +188,7 @@ macro_rules! pinstore_interface_tests {
assert!(both.is_empty(), "{:?}", both);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn pin_recursive_pins_all_blocks() {
let repo = DSTestContext::with($factory).await;
@ -221,7 +221,7 @@ macro_rules! pinstore_interface_tests {
assert!(both.is_empty(), "{:?}", both);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn indirect_can_be_pinned_directly() {
let repo = DSTestContext::with($factory).await;
@ -261,7 +261,7 @@ macro_rules! pinstore_interface_tests {
assert!(both.is_empty(), "{:?}", both);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn direct_and_indirect_when_parent_unpinned() {
let repo = DSTestContext::with($factory).await;
@ -311,7 +311,7 @@ macro_rules! pinstore_interface_tests {
assert!(one.is_empty(), "{:?}", one);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn cannot_pin_recursively_pinned_directly() {
// this is a bit of odd as other ops are additive
let repo = DSTestContext::with($factory).await;

View File

@ -93,9 +93,9 @@ impl FsBlockStore {
match rx.recv().await {
Ok(Ok(())) => WriteCompletion::KnownGood,
Err(broadcast::RecvError::Closed) => WriteCompletion::NotObserved,
Err(broadcast::error::RecvError::Closed) => WriteCompletion::NotObserved,
Ok(Err(_)) => WriteCompletion::KnownBad,
Err(broadcast::RecvError::Lagged(_)) => {
Err(broadcast::error::RecvError::Lagged(_)) => {
unreachable!("sending at most one message to the channel with capacity of one")
}
}
@ -298,12 +298,12 @@ impl BlockStore for FsBlockStore {
trace!("synchronized with writer, write outcome: {:?}", message);
message
}
Err(broadcast::RecvError::Closed) => {
Err(broadcast::error::RecvError::Closed) => {
// there was never any write intention by any party, and we may have just
// closed the last sender above, or we were late for the one message.
Ok(())
}
Err(broadcast::RecvError::Lagged(_)) => {
Err(broadcast::error::RecvError::Lagged(_)) => {
unreachable!("broadcast channel should only be messaged once here")
}
};
@ -442,7 +442,7 @@ mod tests {
use std::env::temp_dir;
use std::sync::Arc;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_fs_blockstore() {
let mut tmp = temp_dir();
tmp.push("blockstore1");
@ -480,7 +480,7 @@ mod tests {
std::fs::remove_dir_all(tmp).ok();
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_fs_blockstore_open() {
let mut tmp = temp_dir();
tmp.push("blockstore2");
@ -505,7 +505,7 @@ mod tests {
std::fs::remove_dir_all(&tmp).ok();
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_fs_blockstore_list() {
let mut tmp = temp_dir();
tmp.push("blockstore_list");
@ -529,7 +529,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn race_to_insert_new() {
// FIXME: why not tempdir?
let mut tmp = temp_dir();
@ -560,7 +560,7 @@ mod tests {
assert_eq!(existing, count - 1);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn race_to_insert_with_existing() {
// FIXME: why not tempdir?
let mut tmp = temp_dir();
@ -633,7 +633,7 @@ mod tests {
(writes, existing)
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn remove() {
// FIXME: why not tempdir?
let mut tmp = temp_dir();

View File

@ -685,7 +685,7 @@ mod tests {
use multihash::Sha2_256;
use std::env::temp_dir;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_mem_blockstore() {
let tmp = temp_dir();
let store = MemBlockStore::new(tmp);
@ -718,7 +718,7 @@ mod tests {
assert_eq!(get.await.unwrap(), None);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_mem_blockstore_list() {
let tmp = temp_dir();
let mem_store = MemBlockStore::new(tmp);
@ -741,7 +741,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_mem_datastore() {
let tmp = temp_dir();
let store = MemDataStore::new(tmp);

View File

@ -429,7 +429,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn subscription_basics() {
let registry = SubscriptionRegistry::<u32, ()>::default();
let s1 = registry.create_subscription(0.into(), None);
@ -441,7 +441,7 @@ mod tests {
assert_eq!(s3.await.unwrap(), 10);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn subscription_cancelled_on_dropping_registry() {
let registry = SubscriptionRegistry::<u32, ()>::default();
let s1 = registry.create_subscription(0.into(), None);
@ -449,7 +449,7 @@ mod tests {
assert_eq!(s1.await, Err(SubscriptionErr::Cancelled));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn subscription_cancelled_on_shutdown() {
let registry = SubscriptionRegistry::<u32, ()>::default();
let s1 = registry.create_subscription(0.into(), None);
@ -457,7 +457,7 @@ mod tests {
assert_eq!(s1.await, Err(SubscriptionErr::Cancelled));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn new_subscriptions_cancelled_after_shutdown() {
let registry = SubscriptionRegistry::<u32, ()>::default();
registry.shutdown();
@ -465,7 +465,7 @@ mod tests {
assert_eq!(s1.await, Err(SubscriptionErr::Cancelled));
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn dropping_subscription_future_after_registering() {
use std::time::Duration;
use tokio::time::timeout;
@ -488,12 +488,12 @@ mod tests {
// this test is designed to verify that the subscription registry is working properly
// and doesn't break even under extreme conditions
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[ignore]
async fn subscription_stress_test() {
use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng};
use std::time::Duration;
use tokio::{task, time::delay_for};
use tokio::{task, time::sleep};
// optional
tracing_subscriber::fmt::init();
@ -527,7 +527,7 @@ mod tests {
let (mut kind, mut count);
loop {
delay_for(Duration::from_millis(CREATE_WAIT_TIME)).await;
sleep(Duration::from_millis(CREATE_WAIT_TIME)).await;
// the id of the object that will gain subscriptions
kind = rng_clone.gen_range(0, KIND_COUNT);
@ -553,7 +553,7 @@ mod tests {
let (mut kinds, mut count);
loop {
delay_for(Duration::from_millis(FINISH_WAIT_TIME)).await;
sleep(Duration::from_millis(FINISH_WAIT_TIME)).await;
kinds = reg_clone
.subscriptions
@ -576,7 +576,7 @@ mod tests {
let (mut count, mut idx);
loop {
delay_for(Duration::from_millis(CANCEL_WAIT_TIME)).await;
sleep(Duration::from_millis(CANCEL_WAIT_TIME)).await;
let subs_unlocked = &mut *subs.lock().unwrap();
count = rng.gen_range(0, subs_unlocked.len());

View File

@ -8,7 +8,7 @@ mod common;
use common::{spawn_nodes, Topology};
// Ensure that the Bitswap object doesn't leak.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn check_bitswap_cleanups() {
// create a few nodes and connect the first one to others
let mut nodes = spawn_nodes(3, Topology::Star).await;
@ -19,7 +19,7 @@ async fn check_bitswap_cleanups() {
// last node says goodbye; check the number of bitswap peers
if let Some(node) = nodes.pop() {
node.shutdown().await;
time::delay_for(Duration::from_millis(200)).await;
time::sleep(Duration::from_millis(200)).await;
}
let bitswap_peers = nodes[0].get_bitswap_peers().await.unwrap();
@ -28,7 +28,7 @@ async fn check_bitswap_cleanups() {
// another node says goodbye; check the number of bitswap peers
if let Some(node) = nodes.pop() {
node.shutdown().await;
time::delay_for(Duration::from_millis(200)).await;
time::sleep(Duration::from_millis(200)).await;
}
let bitswap_peers = nodes[0].get_bitswap_peers().await.unwrap();
@ -41,7 +41,7 @@ async fn check_bitswap_cleanups() {
// testing the bitswap protocol (though it would be advised to uncomment
// the tracing_subscriber for stress-testing purposes)
#[ignore]
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn bitswap_stress_test() {
fn filter(i: usize) -> bool {
i % 2 == 0

View File

@ -15,7 +15,7 @@ fn create_block() -> Block {
}
// verify that a put block can be received via get_block and the data matches
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn two_node_put_get() {
let nodes = spawn_nodes(2, Topology::Line).await;
let block = create_block();
@ -30,7 +30,7 @@ async fn two_node_put_get() {
}
// check that a long line of nodes still works with get_block
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn long_get_block() {
// this number could be higher, but it starts hanging above ~24
const N: usize = 10;

View File

@ -68,7 +68,7 @@ mod tests {
const N: usize = 5;
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn check_topology_line() {
let nodes = spawn_nodes(N, Topology::Line).await;
@ -81,7 +81,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn check_topology_ring() {
let nodes = spawn_nodes(N, Topology::Ring).await;
@ -90,7 +90,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn check_topology_mesh() {
let nodes = spawn_nodes(N, Topology::Mesh).await;
@ -99,7 +99,7 @@ mod tests {
}
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn check_topology_star() {
let nodes = spawn_nodes(N, Topology::Star).await;

View File

@ -11,7 +11,7 @@ use common::interop::ForeignNode;
const TIMEOUT: Duration = Duration::from_secs(5);
// Make sure two instances of ipfs can be connected by `Multiaddr`.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn connect_two_nodes_by_addr() {
let node_a = Node::new("a").await;
@ -27,7 +27,7 @@ async fn connect_two_nodes_by_addr() {
}
// Make sure only a `Multiaddr` with `/p2p/` can be used to connect.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[should_panic(expected = "called `Result::unwrap()` on an `Err` value: MissingProtocolP2p")]
async fn dont_connect_without_p2p() {
let node_a = Node::new("a").await;
@ -45,7 +45,7 @@ async fn dont_connect_without_p2p() {
// Make sure two instances of ipfs can be connected by `PeerId`.
#[ignore = "connecting just by PeerId is not currently supported"]
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn connect_two_nodes_by_peer_id() {
let node_a = Node::new("a").await;
let node_b = Node::new("b").await;
@ -63,7 +63,7 @@ async fn connect_two_nodes_by_peer_id() {
}
// Ensure that duplicate connection attempts don't cause hangs.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn connect_duplicate_multiaddr() {
let node_a = Node::new("a").await;
let node_b = Node::new("b").await;
@ -79,7 +79,7 @@ async fn connect_duplicate_multiaddr() {
// More complicated one to the above; first node will have two listening addresses and the second
// one should dial both of the addresses, resulting in two connections.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn connect_two_nodes_with_two_connections_doesnt_panic() {
let node_a = Node::new("a").await;
let node_b = Node::new("b").await;

View File

@ -15,7 +15,7 @@ fn strip_peer_id(addr: Multiaddr) -> Multiaddr {
}
/// Check if `Ipfs::find_peer` works without DHT involvement.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn find_peer_local() {
let nodes = spawn_nodes(2, Topology::None).await;
nodes[0].connect(nodes[1].addrs[0].clone()).await.unwrap();
@ -101,7 +101,7 @@ async fn spawn_bootstrapped_nodes(n: usize) -> (Vec<Node>, Option<ForeignNode>)
}
/// Check if `Ipfs::find_peer` works using DHT.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn dht_find_peer() {
// works for numbers >=2, though 2 would essentially just
// be the same as find_peer_local, so it should be higher
@ -121,7 +121,7 @@ async fn dht_find_peer() {
assert_eq!(found_addrs, vec![to_be_found]);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn dht_get_closest_peers() {
const CHAIN_LEN: usize = 10;
let (nodes, _foreign_node) = spawn_bootstrapped_nodes(CHAIN_LEN).await;
@ -137,7 +137,7 @@ async fn dht_get_closest_peers() {
}
#[ignore = "targets an actual bootstrapper, so random failures can happen"]
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn dht_popular_content_discovery() {
let peer = Node::new("a").await;
@ -154,7 +154,7 @@ async fn dht_popular_content_discovery() {
}
/// Check if Ipfs::{get_providers, provide} does its job.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn dht_providing() {
const CHAIN_LEN: usize = 10;
let (nodes, foreign_node) = spawn_bootstrapped_nodes(CHAIN_LEN).await;
@ -183,7 +183,7 @@ async fn dht_providing() {
}
/// Check if Ipfs::{get, put} does its job.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn dht_get_put() {
const CHAIN_LEN: usize = 10;
let (nodes, foreign_node) = spawn_bootstrapped_nodes(CHAIN_LEN).await;

View File

@ -1,4 +1,4 @@
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn multiple_consecutive_ephemeral_listening_addresses() {
let node = ipfs::Node::new("test_node").await;
@ -12,7 +12,7 @@ async fn multiple_consecutive_ephemeral_listening_addresses() {
assert_ne!(first, second);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn multiple_concurrent_ephemeral_listening_addresses_on_same_ip() {
let node = ipfs::Node::new("test_node").await;
@ -41,7 +41,7 @@ async fn multiple_concurrent_ephemeral_listening_addresses_on_same_ip() {
);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[cfg(not(target_os = "macos"))]
async fn multiple_concurrent_ephemeral_listening_addresses_on_different_ip() {
let node = ipfs::Node::new("test_node").await;
@ -59,7 +59,7 @@ async fn multiple_concurrent_ephemeral_listening_addresses_on_different_ip() {
second.unwrap();
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn adding_unspecified_addr_resolves_with_first() {
let node = ipfs::Node::new("test_node").await;
// there is no test in trying to match this with others as ... that would be quite
@ -69,7 +69,7 @@ async fn adding_unspecified_addr_resolves_with_first() {
.unwrap();
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn listening_for_multiple_unspecified_addresses() {
let node = ipfs::Node::new("test_node").await;
// there is no test in trying to match this with others as ... that would be quite
@ -93,7 +93,7 @@ async fn listening_for_multiple_unspecified_addresses() {
);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn remove_listening_address() {
let node = ipfs::Node::new("test_node").await;
@ -117,7 +117,7 @@ fn remove_listening_address_before_completing() {
// "immediatedly".
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn pre_configured_listening_addrs() {
use ipfs::{IpfsOptions, MultiaddrWithPeerId, MultiaddrWithoutPeerId, Node};
use libp2p::Multiaddr;

View File

@ -7,14 +7,14 @@ use tokio::time::timeout;
mod common;
use common::{spawn_nodes, Topology};
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn subscribe_only_once() {
let a = Node::new("test_node").await;
let _stream = a.pubsub_subscribe("some_topic".into()).await.unwrap();
a.pubsub_subscribe("some_topic".into()).await.unwrap_err();
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn resubscribe_after_unsubscribe() {
let a = Node::new("test_node").await;
@ -26,7 +26,7 @@ async fn resubscribe_after_unsubscribe() {
drop(a.pubsub_subscribe("topic".into()).await.unwrap());
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn unsubscribe_via_drop() {
let a = Node::new("test_node").await;
@ -39,7 +39,7 @@ async fn unsubscribe_via_drop() {
assert_eq!(a.pubsub_subscribed().await.unwrap(), empty);
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn can_publish_without_subscribing() {
let a = Node::new("test_node").await;
a.pubsub_publish("topic".into(), b"foobar".to_vec())
@ -47,7 +47,7 @@ async fn can_publish_without_subscribing() {
.unwrap()
}
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[allow(clippy::mutable_key_type)] // clippy doesn't like Vec inside HashSet
async fn publish_between_two_nodes() {
use futures::stream::StreamExt;
@ -140,7 +140,7 @@ async fn publish_between_two_nodes() {
}
#[cfg(any(feature = "test_go_interop", feature = "test_js_interop"))]
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
#[ignore = "doesn't work yet"]
async fn pubsub_interop() {
use common::interop::{api_call, ForeignNode};

View File

@ -4,7 +4,7 @@ use futures::future::{AbortHandle, Abortable};
use ipfs::Node;
use tokio::{
task,
time::{delay_for, timeout},
time::{sleep, timeout},
};
use std::{
@ -33,7 +33,7 @@ where
if check(future().await) {
return Ok(());
}
delay_for(Duration::from_millis(100)).await;
sleep(Duration::from_millis(100)).await;
elapsed = started.elapsed();
}
}
@ -51,7 +51,7 @@ async fn check_cid_subscriptions(ipfs: &Node, cid: &Cid, expected_count: usize)
}
/// Check if canceling a Cid affects the wantlist.
#[tokio::test(max_threads = 1)]
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn wantlist_cancellation() {
tracing_subscriber::fmt::init();