"The second data argument, does not decode to a context" when mintin with Mesh on Cardano - smartcontracts

This my script for minting NFT:
{-# INLINABLE mkPolicy #-}
mkPolicy :: BuiltinData -> PlutusV2.ScriptContext -> Bool
mkPolicy _ ctx = traceIfFalse "wrong amount minted" checkNFTAmount
where
info :: PlutusV2.TxInfo
info = PlutusV2.scriptContextTxInfo ctx
-- hasUTxO :: Bool
-- hasUTxO = any (\i -> PlutusV2.txInInfoOutRef i == mpTxOutRef r) $ PlutusV2.txInfoInputs info
checkNFTAmount :: Bool
checkNFTAmount = case Value.flattenValue (PlutusV2.txInfoMint info) of
[(cs, tn', amt)] -> cs == ownCurrencySymbol ctx && tn' == PlutusV2.TokenName "" && amt == 1
_ -> False
{-
As a Minting Policy
-}
compiledCode :: PlutusTx.CompiledCode (BuiltinData -> BuiltinData -> ())
compiledCode = $$(PlutusTx.compile [|| wrap ||])
where
wrap = Scripts.mkUntypedMintingPolicy mkPolicy
policy :: Scripts.MintingPolicy
policy = PlutusV2.mkMintingPolicyScript compiledCode
script :: PlutusV2.Script
script = PlutusV2.unMintingPolicyScript policy
{-
As a Short Byte String
-}
scriptSBS :: SBS.ShortByteString
scriptSBS = SBS.toShort . LBS.toStrict $ serialise script
{-
As a Serialised Script
-}
serialisedScript :: PlutusScript PlutusScriptV2
serialisedScript = PlutusScriptSerialised scriptSBS
writeSerialisedScript :: IO ()
writeSerialisedScript = void $ writeFileTextEnvelope "nft-mint-V2.plutus" Nothing serialisedScript
I'm using Mesh to minting NFT with that script
const walletAddr = wallet.getPaymentAddress();
const addressUtxo: UTxO[] = await provider.fetchAddressUTxOs(walletAddr);
const redeemer: Partial<Action> = {
tag: "MINT",
data: {
alternative: 0,
fields: [],
},
};
const assetMetadata: AssetMetadata = {
name: "MyNFT",
image: "https://picsum.photos/200",
mediaType: "image/jpg",
description: "This NFT is minted by me.",
};
const asset: Mint = {
assetName: "MyNFT",
assetQuantity: "1",
metadata: assetMetadata,
label: "721",
recipient: walletAddr,
};
// Mint NFT
const tx = new Transaction({ initiator: wallet });
tx.mintAsset(script, asset, redeemer);
tx.setCollateral([addressUtxo[0]]);
const unsignedTx = await tx.build();
const signedTx = await wallet.signTx(unsignedTx, true);
try {
const txHash = await wallet.submitTx(signedTx);
console.log(txHash);
} catch (e) {
console.log(e);
}
Unfortunately, it returned with this error:
transaction submit error ShelleyTxValidationError ShelleyBasedEraBabbage
(ApplyTxError
[UtxowFailure
(UtxoFailure
(FromAlonzoUtxoFail
(UtxosFailure
(ValidationTagMismatch
(IsValid True)
(FailedUnexpectedly
(PlutusFailure \\\"\\\\nThe 2 arg plutus script (PlutusScript PlutusV2 ScriptHash \\\\\\\"77f807bc9403ef0177cc2a9956bfd5628ee649680041ccf48a198fc0\\\\\\\") fails.
\\\\nCekError An error has occurred:
User error:\\\\nThe machine terminated because of an error, either from a built-in function or from an explicit use of 'error'.
\\\\nThe protocol version is: ProtVer {pvMajor = 7, pvMinor = 0}\\\\nThe redeemer is: Constr 0 []\\\\
nThe second data argument, does not decode to a context
Does anyone faced this error before? As the error said "The second data argument, does not decode to a context" and I specify context in my script like this, I think the problem is with the context param. Or what is wrong with my script.

Related

Some contract functions runs on hardhat network While others don't

I Got following error while testing my contract
$ npx hardhat test
NftyplayLicense
buyLicense
done!
1) should buy the license successfully
0 passing (2s)
1 failing
1) NftyplayLicense
buyLicense
should buy the license successfully:
Error: invalid address or ENS name (argument="name", value=undefined, code=INVALID_ARGUMENT, version=contracts/5.7.0)
at Logger.makeError (node_modules/#ethersproject/logger/src.ts/index.ts:269:28)
at Logger.throwError (node_modules/#ethersproject/logger/src.ts/index.ts:281:20)
at Logger.throwArgumentError (node_modules/#ethersproject/logger/src.ts/index.ts:285:21)
at /home/bhavesh/Documents/boilerplate/node_modules/#ethersproject/contracts/src.ts/index.ts:123:16
at step (node_modules/#ethersproject/contracts/lib/index.js:48:23)
at Object.next (node_modules/#ethersproject/contracts/lib/index.js:29:53)
at fulfilled (node_modules/#ethersproject/contracts/lib/index.js:20:58)
at processTicksAndRejections (node:internal/process/task_queues:95:5)
Following is the Test that I have written
const { assert, expect } = require("chai")
const { network, ethers, deployments } = require("hardhat")
const { developmentChains } = require("../helper-hardhat-config")
!developmentChains.includes(network.name)
? describe.skip()
: describe("NftyplayLicense", function () {
let nftyplayLicenseContract, nftyPlayLicense, deployer, player, param
beforeEach(async function () {
const accounts = await ethers.getSigners()
deployer = accounts[0]
player = accounts[1]
await deployments.fixture(["all"])
nftyplayLicenseContract = await ethers.getContract("NftyPlayLicensing")
nftyPlayLicense = nftyplayLicenseContract.connect(deployer)
param = {
name: "NftyPlayLicensing",
category: "1",
durationInDays: "1",
licenseType: "1",
nftAddress: "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC",
nonce: 735,
price: "1000000000000000000",
r: "0xb07df588e0674bc28050a58a7f2cfd315f7e0aec136d0ebcf89fdcd9fa8aa928",
s: "0x109a8b6ff70709d2fecdba8b385097f2f979738518e4c444d4cf95b7e2c9621b",
signer: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
tokenId: "1",
v: 27,
}
})
describe("buyLicense", function () {
it("should buy the license successfully", async function () {
nftyPlayLicense.whitelist("0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC")
console.log("done!")
nftyPlayLicense = nftyplayLicenseContract.connect(player)
await expect(
nftyPlayLicense.buyLicense(param, "https://uri.com", {
value: ethers.utils.parseEther("1"),
})
).to.emit("NftyPlayLicensing", "LicenseBought")
const result = await nftyPlayLicense.getExecutedOrder(735)
assert.equal(result, true)
})
})
})
Note that
nftyPlayLicense.whitelist("0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC")
is running fine, not not in case of
nftyPlayLicense.buyLicense(param, "https://uri.com", {
value: ethers.utils.parseEther("1"),
})
I've made order and signature object by metamask and stored it locally and then using that i am calling buyLicense function from contract using that object as params.
this is my contract function:
function buyLicense(OrderTypes.LicenseOrder calldata order, string memory uri) public payable {
require(isOrderExecuted[order.nonce] == false, "Order is already Executed");
require(isCancelled[order.signer][order.nonce] == false, "Listing is Cancelled");
bytes32 orderHash = SignatureVerifier.hashMakerOrder(order, ORDER_TYPE_HASH);
validateMakerOrder(order, orderHash);
uint256 licenseTokenId = mintLicenseToken(msg.sender, uri, order);
isOrderExecuted[order.nonce] = true;
emit LicenseBought(
licenseTokenId,
order.price,
order.category,
order.tokenId,
order.signer,
order.licenseType,
order.nftContract
);
}
But I am unable to run that function

How do you create an actix-web HttpServer with session-based authentication?

I'm working on an internal API with which approved users can read from and insert into a database. My intention is for this program to run on our local network, and for multiple users or applications to be able to access it.
In its current state it functions as long as the user is running a local instance of the client and does all their work under localhost. However, when the same user attempts to log in from their IP address, the session is not stored and nothing can be accessed. The result is the same when attempting to connect from another computer on the network to a computer running the client.
Before commenting or answering, please be aware that this is my first time implementing authentication. Any mistakes or egregious errors on my part are simply out of ignorance.
My Cargo.toml file includes the following dependencies:
actix-session = { version = "0.7.1", features = ["cookie-session"] }
actix-web = "^4"
argon2 = "0.4.1"
rand_core = "0.6.3"
reqwest = "0.11.11"
serde = { version = "1.0.144", features = ["derive"] }
serde_json = "1.0.85"
sqlx = { version = "0.6.1", features = ["runtime-actix-rustls", "mysql", "macros"] }
Here are the contents of main.rs:
use actix_session::storage::CookieSessionStore;
use actix_session::SessionMiddleware;
use actix_web::cookie::Key;
use actix_web::web::{get, post, Data, Path};
use actix_web::{HttpResponse, Responder};
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let secret_key = Key::generate();
// Load or create a new config file.
// let settings = ...
// Create a connection to the database.
let pool = sqlx::mysql::MySqlPoolOptions::new()
.connect(&format!(
"mysql://{}:{}#{}:{}/mydb",
env!("DB_USER"),
env!("DB_PASS"),
env!("DB_HOST"),
env!("DB_PORT"),
))
.await
.unwrap();
println!(
"Application listening on {}:{}",
settings.host,
settings.port,
);
// Instantiate the application and add routes for each handler.
actix_web::HttpServer::new(move || {
let logger = actix_web::middleware::Logger::default();
actix_web::App::new()
.wrap(SessionMiddleware::new(
CookieSessionStore::default(),
secret_key.clone(),
))
.wrap(logger)
.app_data(Data::new(pool.clone()))
/*
Routes that return all rows from a database table.
*/
/*
Routes that return a webpage.
*/
.route("/new", get().to(new))
.route("/login", get().to(login))
.route("/register", get().to(register))
/*
Routes that deal with authentication.
*/
.route("/register", post().to(register_user))
.route("/login", post().to(login_user))
.route("/logout", get().to(logout_user))
/*
Routes that handle POST requests.
*/
})
.bind(format!("{}:{}", settings.host, settings.port))?
.run()
.await
}
The code involving authentication is as follows:
use crate::model::User;
use actix_session::Session;
use actix_web::web::{Data, Form};
use actix_web::{error::ErrorUnauthorized, HttpResponse};
use argon2::password_hash::{rand_core::OsRng, PasswordHasher, SaltString};
use argon2::{Argon2, PasswordHash, PasswordVerifier};
use sqlx::{MySql, Pool};
#[derive(serde::Serialize)]
pub struct SessionDetails {
user_id: u32,
}
#[derive(Debug, sqlx::FromRow)]
pub struct AuthorizedUser {
pub id: u32,
pub username: String,
pub password_hash: String,
pub approved: bool,
}
pub fn check_auth(session: &Session) -> Result<u32, actix_web::Error> {
match session.get::<u32>("user_id").unwrap() {
Some(user_id) => Ok(user_id),
None => Err(ErrorUnauthorized("User not logged in.")),
}
}
pub async fn register_user(
data: Form<User>,
pool: Data<Pool<MySql>>,
) -> Result<String, Box<dyn std::error::Error>> {
let data = data.into_inner();
let salt = SaltString::generate(&mut OsRng);
let argon2 = Argon2::default();
let password_hash = argon2
.hash_password(data.password.as_bytes(), &salt)
.unwrap()
.to_string();
// Use to verify.
// let parsed_hash = PasswordHash::new(&hash).unwrap();
const INSERT_QUERY: &str =
"INSERT INTO users (username, password_hash) VALUES (?, ?) RETURNING id;";
let fetch_one: Result<(u32,), sqlx::Error> = sqlx::query_as(INSERT_QUERY)
.bind(data.username)
.bind(password_hash)
.fetch_one(&mut pool.acquire().await.unwrap())
.await;
match fetch_one {
Ok((user_id,)) => Ok(user_id.to_string()),
Err(err) => Err(Box::new(err)),
}
}
pub async fn login_user(
session: Session,
data: Form<User>,
pool: Data<Pool<MySql>>,
) -> Result<HttpResponse, Box<dyn std::error::Error>> {
let data = data.into_inner();
let fetched_user: AuthorizedUser = match sqlx::query_as(
"SELECT id, username, password_hash, approved FROM users WHERE username = ?;",
)
.bind(data.username)
.fetch_one(&mut pool.acquire().await?)
.await
{
Ok(fetched_user) => fetched_user,
Err(e) => return Ok(HttpResponse::NotFound().body(format!("{e:?}"))),
};
let parsed_hash = PasswordHash::new(&fetched_user.password_hash).unwrap();
match Argon2::default().verify_password(&data.password.as_bytes(), &parsed_hash) {
Ok(_) => {
if !fetched_user.approved {
return Ok(
HttpResponse::Unauthorized().body("This account has not yet been approved.")
);
}
session.insert("user_id", &fetched_user.id)?;
session.renew();
Ok(HttpResponse::Ok().json(SessionDetails {
user_id: fetched_user.id,
}))
}
Err(_) => Ok(HttpResponse::Unauthorized().body("Incorrect password.")),
}
}
pub async fn logout_user(session: Session) -> HttpResponse {
if check_auth(&session).is_err() {
return HttpResponse::NotFound().body("No user logged in.");
}
session.purge();
HttpResponse::SeeOther()
.append_header(("Location", "/login"))
.body(format!("User logged out successfully."))
}
I've set my client up to run with host 0.0.0.0 on port 80, but with the little networking knowledge I have that's the best I could think to do — I'm lost here. Any help would be greatly appreciated.
As it turns out, the cookie was not being transmitted because our local network is not using https.
Changing this...
.wrap(SessionMiddleware::new(
CookieSessionStore::default(),
secret_key.clone(),
))
to the following...
.wrap(
SessionMiddleware::builder(CookieSessionStore::default(), secret_key.clone())
.cookie_secure(false)
.build(),
)
solves the issue.

Cloudflare ESI worker / TypeError: Body has already been used

I'm trying to use a CloudFlare worker to manage my backend ESI fragments but i get an error:
Uncaught (in promise) TypeError: Body has already been used. It can only be used once. Use tee() first if you need to read it twice.
Uncaught (in response) TypeError: Body has already been used. It can only be used once. Use tee() first if you need to read it twice.
I don't find where the body has already been used
The process is:
get a response with the parts
Transform the body by replacing parts fragments with sub Backend calls (streamTransformBody function)
return the response
addEventListener("fetch", event => {
event.respondWith(handleRequest(event.request))
});
const esiHeaders = {
"user-agent": "cloudflare"
}
async function handleRequest(request) {
// get cookies from the request
if(cookie = request.headers.get("Cookie")) {
esiHeaders["Cookie"] = cookie
console.log(cookie)
}
// Clone the request so that it's no longer immutable
newRequest = new Request(request)
// remove cookie from request
newRequest.headers.delete('Cookie')
// Add header to get <esi>
newRequest.headers.set("Surrogate-Capability", "abc=ESI/1.0")
console.log(newRequest.url);
const response = await fetch(newRequest);
let contentType = response.headers.get('content-type')
if (!contentType || !contentType.startsWith("text/")) {
return response
}
// Clone the response so that it's no longer immutable
const newResponse = new Response(response.body, response);
let { readable, writable } = new TransformStream()
streamTransformBody(newResponse.body, writable)
newResponse.headers.append('x-workers-hello', 'Hello from
Cloudflare Workers');
return newResponse;
}
async function streamTransformBody(readable, writable) {
const startTag = "<".charCodeAt(0);
const endTag = ">".charCodeAt(0);
let reader = readable.getReader();
let writer = writable.getWriter();
let templateChunks = null;
while (true) {
let { done, value } = await reader.read();
if (done) break;
while (value.byteLength > 0) {
if (templateChunks) {
let end = value.indexOf(endTag);
if (end === -1) {
templateChunks.push(value);
break;
} else {
templateChunks.push(value.subarray(0, end));
await writer.write(await translate(templateChunks));
templateChunks = null;
value = value.subarray(end + 1);
}
}
let start = value.indexOf(startTag);
if (start === -1) {
await writer.write(value);
break;
} else {
await writer.write(value.subarray(0, start));
value = value.subarray(start + 1);
templateChunks = [];
}
}
}
await writer.close();
}
async function translate(chunks) {
const decoder = new TextDecoder();
let templateKey = chunks.reduce(
(accumulator, chunk) =>
accumulator + decoder.decode(chunk, { stream: true }),
""
);
templateKey += decoder.decode();
return handleTemplate(new TextEncoder(), templateKey);
}
async function handleTemplate(encoder, templateKey) {
const linkRegex = /(esi:include.*src="(.*?)".*\/)/gm
let result = linkRegex.exec(templateKey);
let esi
if (!result) {
return encoder.encode(`<${templateKey}>`);
}
if (result[2]) {
esi = await subRequests(result[2]);
}
return encoder.encode(
`${esi}`
);
}
async function subRequests(target){
target = esiHost + target
const init = {
method: 'GET',
headers: esiHeaders
}
let response = await fetch(target, init)
if (!response.ok) {
return ''
}
let text = await response.text()
return '<!--esi-->' + text + '<!--/esi-->'
}

How to call Azure function from Kotlin

I currently have deployed an Azure function used to get an AD token.
Function:
https://getadtokennet.azurewebsites.net/api/getadtokennet
Request header:
x-functions-key = {key}
How can I call this function from my Kotlin app?
This is the way I call it from Javascript
function getTokenAzure(onsuccess, onerror) {
var tokenUrl = 'https://getadtokennet.azurewebsites.net/api/getadtokennet';
$.ajax(tokenUrl, {
method: 'GET',
beforeSend: function (request) {
request.setRequestHeader("x-functions-key", "function key");
},
success: function (data) {
onsuccess(data);
console.log('token: ' + data.token);
},
error: function (xhr, status, error) {
var failureMessage = "GetToken error: " + status + " - " + error;
onerror(failureMessage);
console.log(failureMessage);
}
});
}
In IntelliJ IDEA, select Create New Project.
In the New Project window, select Maven from the left pane.
Select the Create from archetype check box, and then select Add Archetype for the azure-functions-kotlin-archetype.
In the Add Archetype window, complete the fields as follows:
GroupId: com.microsoft.azure
ArtifactId: azure-functions-kotlin-archetype
Version: Use the latest version from the central repository
Select OK, and then select Next.
Enter your details for current project, and select Finish.
For complete information refer to the below links which has same information.
Kotlin Function and Running Kotlin in Azure Functions
I found the way, here it is.
fun getToken(): String {
val tokenUrl = URL("https://getadtokennet.azurewebsites.net/api/getadtokennet")
val connection = tokenUrl.openConnection() as HttpURLConnection
connection.requestMethod = "POST"
connection.setRequestProperty("x-functions-key", "function key")
connection.doOutput = true
val responseCode = connection.responseCode
if (responseCode == HTTP_OK) {
val readerIn = BufferedReader(InputStreamReader(connection.inputStream))
var inputLine = readerIn.readLine()
val response = StringBuffer()
do {
response.append(inputLine)
} while (inputLine.length < 0)
readerIn.close()
// Return token
return response.toString()
} else {
val responseError = Error(code = "BadRequest", message = "There was an error getting the token.")
throw IOException(responseError.toString())
}
}

How do I iterate over a Vec of functions returning Futures in Rust?

Is it possible to loop over a Vec, calling a method that returns a Future on each, and build a chain of Futures, to be evaluated (eventually) by the consumer? Whether to execute the later Futures would depend on the outcome of the earlier Futures in the Vec.
To clarify:
I'm working on an application that can fetch data from an arbitrary set of upstream sources.
Requesting data would check with each of the sources, in turn. If the first source had an error (Err), or did not have the data available (None), then the second source would be tried, and so on.
Each source should be tried exactly once, and no source should be tried until all of the sources before have returned their results. Errors are logged, but otherwise ignored, passing the query to the next upstream data source.
I have some working code that does this for fetching metadata:
/// Attempts to read/write data to various external sources. These are
/// nested types, because a data source may exist as both a reader and a writer
struct StoreManager {
/// Upstream data sources
readers: Vec<Rc<RefCell<StoreRead>>>,
/// Downstream data sinks
writers: Vec<Rc<RefCell<StoreWrite>>>,
}
impl StoreRead for StoreManager {
fn metadata(self: &Self, id: &Identifier) -> Box<Future<Option<Metadata>, Error>> {
Box::new(ok(self.readers
.iter()
.map(|store| {
executor::block_on(store.borrow().metadata(id)).unwrap_or_else(|err| {
error!("Error on metadata(): {:?}", err);
None
})
})
.find(Option::is_some)
.unwrap_or(None)))
}
}
Aside from my unhappiness with all of the Box and Rc/RefCell nonsense, my real concern is with the executor::block_on() call. It blocks, waiting for each Future to return a result, before continuing to the next.
Given that it's possible to call fn_returning_future().or_else(|_| other_fn()) and so on, is it possible to build up a dynamic chain like this? Or is it a requirement to fully evaluate each Future in the iterator before moving to the next?
You can use stream::unfold to convert a single value into a stream. In this case, we can use the IntoIter iterator as that single value.
use futures::{executor, stream, Stream, TryStreamExt}; // 0.3.4
type Error = Box<dyn std::error::Error>;
type Result<T, E = Error> = std::result::Result<T, E>;
async fn network_request(val: i32) -> Result<i32> {
// Just for demonstration, don't do this in a real program
use std::{
thread,
time::{Duration, Instant},
};
thread::sleep(Duration::from_secs(1));
println!("Resolving {} at {:?}", val, Instant::now());
Ok(val * 100)
}
fn requests_in_sequence(vals: Vec<i32>) -> impl Stream<Item = Result<i32>> {
stream::unfold(vals.into_iter(), |mut vals| async {
let val = vals.next()?;
let response = network_request(val).await;
Some((response, vals))
})
}
fn main() {
let s = requests_in_sequence(vec![1, 2, 3]);
executor::block_on(async {
s.try_for_each(|v| async move {
println!("-> {}", v);
Ok(())
})
.await
.expect("An error occurred");
});
}
Resolving 1 at Instant { tv_sec: 6223328, tv_nsec: 294631597 }
-> 100
Resolving 2 at Instant { tv_sec: 6223329, tv_nsec: 310839993 }
-> 200
Resolving 3 at Instant { tv_sec: 6223330, tv_nsec: 311005834 }
-> 300
To ignore Err and None, you have to shuttle the Error over to the Item, making the Item type a Result<Option<T>, Error>:
use futures::{executor, stream, Stream, StreamExt}; // 0.3.4
type Error = Box<dyn std::error::Error>;
type Result<T, E = Error> = std::result::Result<T, E>;
async fn network_request(val: i32) -> Result<Option<i32>> {
// Just for demonstration, don't do this in a real program
use std::{
thread,
time::{Duration, Instant},
};
thread::sleep(Duration::from_secs(1));
println!("Resolving {} at {:?}", val, Instant::now());
match val {
1 => Err("boom".into()), // An error
2 => Ok(None), // No data
_ => Ok(Some(val * 100)), // Success
}
}
fn requests_in_sequence(vals: Vec<i32>) -> impl Stream<Item = Result<Option<i32>>> {
stream::unfold(vals.into_iter(), |mut vals| async {
let val = vals.next()?;
let response = network_request(val).await;
Some((response, vals))
})
}
fn main() {
executor::block_on(async {
let s = requests_in_sequence(vec![1, 2, 3]);
let s = s.filter_map(|v| async move { v.ok() });
let s = s.filter_map(|v| async move { v });
let mut s = s.boxed_local();
match s.next().await {
Some(v) => println!("First success: {}", v),
None => println!("No successful requests"),
}
});
}
Resolving 1 at Instant { tv_sec: 6224229, tv_nsec: 727216392 }
Resolving 2 at Instant { tv_sec: 6224230, tv_nsec: 727404752 }
Resolving 3 at Instant { tv_sec: 6224231, tv_nsec: 727593740 }
First success: 300
is it possible to build up a dynamic chain like this
Yes, by leveraging async functions:
use futures::executor; // 0.3.4
type Error = Box<dyn std::error::Error>;
type Result<T, E = Error> = std::result::Result<T, E>;
async fn network_request(val: i32) -> Result<Option<i32>> {
// Just for demonstration, don't do this in a real program
use std::{
thread,
time::{Duration, Instant},
};
thread::sleep(Duration::from_secs(1));
println!("Resolving {} at {:?}", val, Instant::now());
match val {
1 => Err("boom".into()), // An error
2 => Ok(None), // No data
_ => Ok(Some(val * 100)), // Success
}
}
async fn requests_in_sequence(vals: Vec<i32>) -> Result<i32> {
let mut vals = vals.into_iter().peekable();
while let Some(v) = vals.next() {
match network_request(v).await {
Ok(Some(v)) => return Ok(v),
Err(e) if vals.peek().is_none() => return Err(e),
Ok(None) | Err(_) => { /* Do nothing and try the next source */ }
}
}
Err("Ran out of sources".into())
}
fn main() {
executor::block_on(async {
match requests_in_sequence(vec![1, 2, 3]).await {
Ok(v) => println!("First success: {}", v),
Err(e) => println!("No successful requests: {}", e),
}
});
}
See also:
Creating Diesel.rs queries with a dynamic number of .and()'s
is it a requirement to fully evaluate each Future in the iterator before moving to the next
Isn't that part of your own requirements? Emphasis mine:
Requesting data would check with each of the sources, in turn. If the first source had an error (Err), or did not have the data available (None), then the second source would be tried