diff --git a/substrate/core/basic-authorship/src/basic_authorship.rs b/substrate/core/basic-authorship/src/basic_authorship.rs index e5cdbeb7dc..aa46919224 100644 --- a/substrate/core/basic-authorship/src/basic_authorship.rs +++ b/substrate/core/basic-authorship/src/basic_authorship.rs @@ -16,8 +16,8 @@ //! A consensus proposer for "basic" chains which use the primitive inherent-data. -// FIXME: move this into substrate-consensus-common - https://github.com/paritytech/substrate/issues/1021 - +// FIXME #1021 move this into substrate-consensus-common +// use std::{sync::Arc, self}; use log::{info, trace}; @@ -197,7 +197,6 @@ impl Proposer where let pending_iterator = self.transaction_pool.ready(); for pending in pending_iterator { - // TODO [ToDr] Probably get rid of it, and validate in runtime. let encoded_size = pending.data.encode().len(); if pending_size + encoded_size >= MAX_TRANSACTIONS_SIZE { break } diff --git a/substrate/core/client/db/src/lib.rs b/substrate/core/client/db/src/lib.rs index 50fc3083db..a85d708392 100644 --- a/substrate/core/client/db/src/lib.rs +++ b/substrate/core/client/db/src/lib.rs @@ -307,7 +307,6 @@ where Block: BlockT, } fn reset_storage(&mut self, mut top: StorageMap, children: ChildrenStorageMap) -> Result { - // TODO: wipe out existing trie. if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { return Err(client::error::ErrorKind::GenesisInvalid.into()); @@ -384,7 +383,7 @@ struct DbGenesisStorage(pub H256); impl DbGenesisStorage { pub fn new() -> Self { let mut root = H256::default(); - let mut mdb = MemoryDB::::default(); // TODO: use new() to make it more correct + let mut mdb = MemoryDB::::default(); state_machine::TrieDBMut::::new(&mut mdb, &mut root); DbGenesisStorage(root) } @@ -1024,7 +1023,7 @@ mod tests { fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::default(); // TODO: change to new() to make more correct + let mut changes_trie_update = MemoryDB::::default(); { let mut trie = TrieDBMut::::new( &mut changes_trie_update, diff --git a/substrate/core/client/db/src/light.rs b/substrate/core/client/db/src/light.rs index 13765bfe5d..4221edfa91 100644 --- a/substrate/core/client/db/src/light.rs +++ b/substrate/core/client/db/src/light.rs @@ -341,7 +341,6 @@ impl LightBlockchainStorage for LightStorage // update block number to hash lookup entries. for retracted in tree_route.retracted() { if retracted.hash == meta.finalized_hash { - // TODO: can we recover here? warn!("Safety failure: reverting finalized block {:?}", (&retracted.number, &retracted.hash)); } @@ -438,7 +437,6 @@ impl LightBlockchainStorage for LightStorage fn finalize_header(&self, id: BlockId) -> ClientResult<()> { if let Some(header) = self.header(id)? { let mut transaction = DBTransaction::new(); - // TODO: ensure best chain contains this block. let hash = header.hash(); let number = *header.number(); self.note_finalized(&mut transaction, &header, hash.clone())?; diff --git a/substrate/core/client/db/src/storage_cache.rs b/substrate/core/client/db/src/storage_cache.rs index 455c5a7736..da07aa4d5a 100644 --- a/substrate/core/client/db/src/storage_cache.rs +++ b/substrate/core/client/db/src/storage_cache.rs @@ -45,7 +45,7 @@ pub type SharedCache = Arc>>; /// Create new shared cache instance with given max memory usage. pub fn new_shared_cache(shared_cache_size: usize) -> SharedCache { - let cache_items = shared_cache_size / 100; // Estimated average item size. TODO: more accurate tracking + let cache_items = shared_cache_size / 100; // Guestimate, potentially inaccurate Arc::new(Mutex::new(Cache { storage: LruCache::new(cache_items), hashes: LruCache::new(cache_items), diff --git a/substrate/core/client/src/call_executor.rs b/substrate/core/client/src/call_executor.rs index 263c43596b..4c2ff946b5 100644 --- a/substrate/core/client/src/call_executor.rs +++ b/substrate/core/client/src/call_executor.rs @@ -205,7 +205,6 @@ where native_call: Option, ) -> Result, error::Error> where ExecutionManager: Clone { let state = self.backend.state_at(*at)?; - //TODO: Find a better way to prevent double block initialization if method != "Core_initialise_block" && initialised_block.map(|id| id != *at).unwrap_or(true) { let header = prepare_environment_block()?; state_machine::execute_using_consensus_failure_handler::< diff --git a/substrate/core/client/src/client.rs b/substrate/core/client/src/client.rs index 6921ecf312..e8c46156f7 100644 --- a/substrate/core/client/src/client.rs +++ b/substrate/core/client/src/client.rs @@ -130,7 +130,6 @@ pub trait BlockBody { } /// Client info -// TODO: split queue info from chain info and amalgamate into single struct. #[derive(Debug)] pub struct ClientInfo { /// Best block hash. @@ -325,7 +324,6 @@ impl Client where /// Get the RuntimeVersion at a given block. pub fn runtime_version_at(&self, id: &BlockId) -> error::Result { - // TODO: Post Poc-2 return an error if version is missing self.executor.runtime_version(id) } @@ -738,11 +736,9 @@ impl Client where self.apply_finality_with_block_hash(operation, parent_hash, None, last_best, make_notifications)?; } - // TODO: correct path logic for when to execute this function - // https://github.com/paritytech/substrate/issues/1232 + // FIXME #1232: correct path logic for when to execute this function let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?; - // TODO: non longest-chain rule. let is_new_best = finalized || match fork_choice { ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number, ForkChoiceStrategy::Custom(v) => v, @@ -880,7 +876,7 @@ impl Client where // if the block is not a direct ancestor of the current best chain, // then some other block is the common ancestor. if route_from_best.common_block().hash != block { - // TODO: reorganize best block to be the best chain containing + // FIXME: #1442 reorganize best block to be the best chain containing // `block`. } @@ -1020,7 +1016,7 @@ impl Client where /// Get block status. pub fn block_status(&self, id: &BlockId) -> error::Result { - // TODO: more efficient implementation + // this can probably be implemented more efficiently if let BlockId::Hash(ref h) = id { if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { return Ok(BlockStatus::Queued); @@ -1073,10 +1069,9 @@ impl Client where /// If `maybe_max_block_number` is `Some(max_block_number)` /// the search is limited to block `numbers <= max_block_number`. /// in other words as if there were no blocks greater `max_block_number`. - /// - /// TODO [snd] possibly implement this on blockchain::Backend and just redirect here + /// TODO : we want to move this implement to `blockchain::Backend`, see [#1443](https://github.com/paritytech/substrate/issues/1443) /// Returns `Ok(None)` if `target_hash` is not found in search space. - /// TODO [snd] write down time complexity + /// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) pub fn best_containing(&self, target_hash: Block::Hash, maybe_max_number: Option>) -> error::Result> { @@ -1140,7 +1135,6 @@ impl Client where // waiting until we are <= max_number if let Some(max_number) = maybe_max_number { loop { - // TODO [snd] this should be a panic let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))? .ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?; @@ -1160,7 +1154,6 @@ impl Client where return Ok(Some(best_hash)); } - // TODO [snd] this should be a panic let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))? .ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?; @@ -1176,8 +1169,7 @@ impl Client where // header may be on a dead fork -- the only leaves that are considered are // those which can still be finalized. // - // TODO: only issue this warning when not on a dead fork - // part of https://github.com/paritytech/substrate/issues/1558 + // FIXME #1558 only issue this warning when not on a dead fork warn!( "Block {:?} exists in chain but not found when following all \ leaves backwards. Number limit = {:?}", diff --git a/substrate/core/client/src/light/blockchain.rs b/substrate/core/client/src/light/blockchain.rs index 94d9da9994..82fff6be6f 100644 --- a/substrate/core/client/src/light/blockchain.rs +++ b/substrate/core/client/src/light/blockchain.rs @@ -141,7 +141,7 @@ impl BlockchainHeaderBackend for Blockchain where Bloc impl BlockchainBackend for Blockchain where Block: BlockT, S: Storage, F: Fetcher { fn body(&self, _id: BlockId) -> ClientResult>> { - // TODO [light]: fetch from remote node + // TODO: #1445 fetch from remote node Ok(None) } diff --git a/substrate/core/consensus/aura/src/lib.rs b/substrate/core/consensus/aura/src/lib.rs index 25bf983de6..38b5ce53a4 100644 --- a/substrate/core/consensus/aura/src/lib.rs +++ b/substrate/core/consensus/aura/src/lib.rs @@ -392,7 +392,7 @@ enum CheckedHeader { /// check a header has been signed by the right key. If the slot is too far in the future, an error will be returned. /// if it's successful, returns the pre-header, the slot number, and the signat. // -// FIXME: needs misbehavior types - https://github.com/paritytech/substrate/issues/1018 +// FIXME #1018 needs misbehavior types fn check_header(slot_now: u64, mut header: B::Header, hash: B::Hash, authorities: &[Ed25519AuthorityId]) -> Result, String> where DigestItemFor: CompatibleDigestItem @@ -537,8 +537,7 @@ impl Verifier for AuraVerifier where ); // we add one to allow for some small drift. - // FIXME: in the future, alter this queue to allow deferring of headers - // https://github.com/paritytech/substrate/issues/1019 + // FIXME #1019 in the future, alter this queue to allow deferring of headers let checked_header = check_header::(slot_now + 1, header, hash, &authorities[..])?; match checked_header { CheckedHeader::Checked(pre_header, slot_num, sig) => { @@ -577,7 +576,7 @@ impl Verifier for AuraVerifier where fork_choice: ForkChoiceStrategy::LongestChain, }; - // FIXME: extract authorities - https://github.com/paritytech/substrate/issues/1019 + // FIXME #1019 extract authorities Ok((import_block, None)) } CheckedHeader::Deferred(a, b) => { diff --git a/substrate/core/consensus/common/src/import_queue.rs b/substrate/core/consensus/common/src/import_queue.rs index 3e4f4f6a0b..67dbf63186 100644 --- a/substrate/core/consensus/common/src/import_queue.rs +++ b/substrate/core/consensus/common/src/import_queue.rs @@ -401,7 +401,7 @@ pub fn import_single_block>( } else { debug!(target: "sync", "Header {} was not provided ", block.hash); } - return Err(BlockImportError::IncompleteHeader(peer)) //TODO: use persistent ID + return Err(BlockImportError::IncompleteHeader(peer)) }, }; @@ -441,7 +441,7 @@ pub fn import_single_block>( }, Ok(ImportResult::KnownBad) => { debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer)) //TODO: use persistent ID + Err(BlockImportError::BadBlock(peer)) }, Err(e) => { debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); diff --git a/substrate/core/consensus/rhd/src/lib.rs b/substrate/core/consensus/rhd/src/lib.rs index 1282432f76..71c34fd79b 100644 --- a/substrate/core/consensus/rhd/src/lib.rs +++ b/substrate/core/consensus/rhd/src/lib.rs @@ -31,7 +31,7 @@ //! set for this block height. #![cfg(feature="rhd")] -// FIXME: doesn't compile - https://github.com/paritytech/substrate/issues/1020 +// FIXME #1020 doesn't compile use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -432,7 +432,6 @@ impl Drop for BftFuture OutSink: Sink, SinkError=Error>, { fn drop(&mut self) { - // TODO: have a trait member to pass misbehavior reports into. let misbehavior = self.inner.drain_misbehavior().collect::>(); self.inner.context().proposer.import_misbehavior(misbehavior); } @@ -466,7 +465,7 @@ pub struct BftService { live_agreement: Mutex>, round_cache: Arc>>, round_timeout_multiplier: u64, - key: Arc, // TODO: key changing over time. + key: Arc, factory: P, } @@ -488,14 +487,13 @@ impl BftService start_round: 0, })), round_timeout_multiplier: 10, - key: key, // TODO: key changing over time. + key: key, factory, } } /// Get the local Authority ID. pub fn local_id(&self) -> AuthorityId { - // TODO: based on a header and some keystore. self.key.public().into() } @@ -1084,7 +1082,6 @@ impl BaseProposer<::Block> for Proposer where self.transaction_pool.ready(|pending_iterator| { let mut pending_size = 0; for pending in pending_iterator { - // TODO [ToDr] Probably get rid of it, and validate in runtime. let encoded_size = pending.data.encode().len(); if pending_size + encoded_size >= MAX_TRANSACTIONS_SIZE { break } diff --git a/substrate/core/executor/src/native_executor.rs b/substrate/core/executor/src/native_executor.rs index d6de933c7f..753c2227f6 100644 --- a/substrate/core/executor/src/native_executor.rs +++ b/substrate/core/executor/src/native_executor.rs @@ -268,7 +268,6 @@ macro_rules! native_executor_instance { native_executor_instance!(IMPL $name, $dispatcher, $version, $code); }; (IMPL $name:ident, $dispatcher:path, $version:path, $code:expr) => { - // TODO: this is not so great – I think I should go back to have dispatch take a type param and modify this macro to accept a type param and then pass it in from the test-client instead use primitives::Blake2Hasher as _Blake2Hasher; impl $crate::NativeExecutionDispatch for $name { fn native_equivalent() -> &'static [u8] { diff --git a/substrate/core/executor/src/wasm_executor.rs b/substrate/core/executor/src/wasm_executor.rs index a08f828d05..f062603aef 100644 --- a/substrate/core/executor/src/wasm_executor.rs +++ b/substrate/core/executor/src/wasm_executor.rs @@ -118,7 +118,6 @@ impl ReadPrimitive for MemoryInstance { } } -// TODO: this macro does not support `where` clauses and that seems somewhat tricky to add impl_function_executor!(this: FunctionExecutor<'e, E>, ext_print_utf8(utf8_data: *const u8, utf8_len: u32) => { if let Ok(utf8) = this.memory.get(utf8_data, utf8_len as usize) { diff --git a/substrate/core/finality-grandpa/src/lib.rs b/substrate/core/finality-grandpa/src/lib.rs index d84cf00728..fd2828b9bb 100644 --- a/substrate/core/finality-grandpa/src/lib.rs +++ b/substrate/core/finality-grandpa/src/lib.rs @@ -577,7 +577,6 @@ impl, N, RA> voter::Environment( round, self.set_id, @@ -857,7 +856,7 @@ fn finalize_block, E, RA>( // lock must be held through writing to DB to avoid race let mut authority_set = authority_set.inner().write(); - // TODO [andre]: clone only when changed (#1483) + // FIXME #1483: clone only when changed let old_authority_set = authority_set.clone(); // needed in case there is an authority set change, used for reverting in // case of error diff --git a/substrate/core/keystore/src/lib.rs b/substrate/core/keystore/src/lib.rs index 56e1ae62e8..63da247970 100644 --- a/substrate/core/keystore/src/lib.rs +++ b/substrate/core/keystore/src/lib.rs @@ -58,7 +58,7 @@ pub struct InvalidPassword; struct EncryptedKey { mac: [u8; 32], salt: [u8; 32], - ciphertext: Vec, // TODO: switch to fixed-size when serde supports + ciphertext: Vec, // FIXME: switch to fixed-size when serde supports iv: [u8; 16], iterations: u32, } diff --git a/substrate/core/network/src/consensus_gossip.rs b/substrate/core/network/src/consensus_gossip.rs index 6fa7f941ee..186919e509 100644 --- a/substrate/core/network/src/consensus_gossip.rs +++ b/substrate/core/network/src/consensus_gossip.rs @@ -76,7 +76,6 @@ impl ConsensusGossip { if roles.intersects(Roles::AUTHORITY) { trace!(target:"gossip", "Registering {:?} {}", roles, who); // Send out all known messages to authorities. - // TODO: limit by size let mut known_messages = HashSet::new(); for entry in self.messages.iter() { known_messages.insert((entry.topic, entry.message_hash)); diff --git a/substrate/core/network/src/on_demand.rs b/substrate/core/network/src/on_demand.rs index d71352e86e..a9b00dc120 100644 --- a/substrate/core/network/src/on_demand.rs +++ b/substrate/core/network/src/on_demand.rs @@ -213,7 +213,7 @@ impl OnDemandService for OnDemand where B::Header: HeaderT, { fn on_connect(&self, peer: NodeIndex, role: Roles, best_number: NumberFor) { - if !role.intersects(Roles::FULL | Roles::AUTHORITY) { // TODO: correct? + if !role.intersects(Roles::FULL | Roles::AUTHORITY) { return; } diff --git a/substrate/core/network/src/protocol.rs b/substrate/core/network/src/protocol.rs index 49343cf208..2506dcd546 100644 --- a/substrate/core/network/src/protocol.rs +++ b/substrate/core/network/src/protocol.rs @@ -458,7 +458,6 @@ impl, H: ExHashT> Protocol { message::FromBlock::Number(n) => BlockId::Number(n), }; let max = cmp::min(request.max.unwrap_or(u32::max_value()), MAX_BLOCK_DATA_RESPONSE) as usize; - // TODO: receipts, etc. let get_header = request.fields.contains(message::BlockAttributes::HEADER); let get_body = request.fields.contains(message::BlockAttributes::BODY); let get_justification = request.fields.contains(message::BlockAttributes::JUSTIFICATION); @@ -498,7 +497,6 @@ impl, H: ExHashT> Protocol { } fn on_block_response(&self, io: &mut SyncIo, peer: NodeIndex, request: message::BlockRequest, response: message::BlockResponse) { - // TODO: validate response let blocks_range = match ( response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), diff --git a/substrate/core/primitives/src/changes_trie.rs b/substrate/core/primitives/src/changes_trie.rs index 0211b57236..924e5928db 100644 --- a/substrate/core/primitives/src/changes_trie.rs +++ b/substrate/core/primitives/src/changes_trie.rs @@ -49,7 +49,7 @@ impl ChangesTrieConfiguration { return 1; } - // TODO: use saturating_pow when available + // FIXME: use saturating_pow once stabilized - https://github.com/rust-lang/rust/issues/48320 let mut max_digest_interval = self.digest_interval; for _ in 1..self.digest_levels { max_digest_interval = match max_digest_interval.checked_mul(self.digest_interval) { diff --git a/substrate/core/primitives/src/ed25519.rs b/substrate/core/primitives/src/ed25519.rs index 05077fbd37..ba43be841b 100644 --- a/substrate/core/primitives/src/ed25519.rs +++ b/substrate/core/primitives/src/ed25519.rs @@ -238,16 +238,6 @@ impl Pair { r.copy_from_slice(pk); Public(r) } - - /// Derive a child key. Probably unsafe and broken. - // TODO: proper HD derivation https://cardanolaunch.com/assets/Ed25519_BIP.pdf - pub fn derive_child_probably_bad(&self, chain_data: &[u8]) -> Pair { - let sig = self.sign(chain_data); - let mut seed = [0u8; 32]; - seed.copy_from_slice(&sig[..32]); - - Pair::from_seed(&seed) - } } /// Verify a signature on a message. Returns true if the signature is good. @@ -350,12 +340,6 @@ mod test { assert_eq!(pair1.public(), pair2.public()); } - #[test] - fn derive_child() { - let pair = Pair::generate(); - let _pair2 = pair.derive_child_probably_bad(b"session_1234"); - } - #[test] fn ss58check_roundtrip_works() { let pair = Pair::from_seed(b"12345678901234567890123456789012"); diff --git a/substrate/core/rpc/src/chain/number.rs b/substrate/core/rpc/src/chain/number.rs index 7a300313e3..4d8659fcf3 100644 --- a/substrate/core/rpc/src/chain/number.rs +++ b/substrate/core/rpc/src/chain/number.rs @@ -41,7 +41,8 @@ impl> NumberOrHex { match self { NumberOrHex::Number(n) => Ok(n), NumberOrHex::Hex(h) => { - // TODO [ToDr] this only supports `u64` since `BlockNumber` is `As` we could possibly go with `u128`. (#1377) + // FIXME #1377 this only supports `u64` since `BlockNumber` + // is `As` we could possibly go with `u128`. let l = h.low_u64(); if U256::from(l) != h { Err(format!("`{}` does not fit into the block number type.", h)) diff --git a/substrate/core/service/src/chain_ops.rs b/substrate/core/service/src/chain_ops.rs index 28bff44514..9813301a64 100644 --- a/substrate/core/service/src/chain_ops.rs +++ b/substrate/core/service/src/chain_ops.rs @@ -106,7 +106,7 @@ pub fn import_blocks( impl Link for DummyLink { } let client = new_client::(&config)?; - // FIXME: this shouldn't need a mutable config. https://github.com/paritytech/substrate/issues/1134 + // FIXME #1134 this shouldn't need a mutable config. let queue = components::FullComponents::::build_import_queue(&mut config, client.clone())?; queue.start(DummyLink)?; diff --git a/substrate/core/service/src/components.rs b/substrate/core/service/src/components.rs index 51dc086689..c966911996 100644 --- a/substrate/core/service/src/components.rs +++ b/substrate/core/service/src/components.rs @@ -217,7 +217,6 @@ impl MaintainTransactionPool for C where ComponentClient: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: TaggedTransactionQueue>, { - // TODO [ToDr] Optimize and re-use tags from the pool. fn on_block_imported( id: &BlockId>, client: &ComponentClient, @@ -329,7 +328,7 @@ pub trait Components: Sized + 'static { type RuntimeApi: Send + Sync; /// A type that can start the RPC. type RPC: StartRPC; - // TODO [ToDr] Traitify transaction pool and allow people to implement their own. (#1242) + // TODO: Traitify transaction pool and allow people to implement their own. (#1242) /// A type that can maintain transaction pool. type TransactionPool: MaintainTransactionPool; /// Extrinsic pool type. diff --git a/substrate/core/service/src/lib.rs b/substrate/core/service/src/lib.rs index 170f38fd4e..abffc2b74c 100644 --- a/substrate/core/service/src/lib.rs +++ b/substrate/core/service/src/lib.rs @@ -137,7 +137,7 @@ impl Service { let mut keystore = Keystore::open(config.keystore_path.as_str().into())?; // This is meant to be for testing only - // FIXME: remove this - https://github.com/paritytech/substrate/issues/1063 + // FIXME #1063 remove this for seed in &config.keys { keystore.generate_from_seed(seed)?; } @@ -274,7 +274,6 @@ impl Service { // extrinsic notifications let network = Arc::downgrade(&network); let events = transaction_pool.import_notification_stream() - // TODO [ToDr] Consider throttling? .for_each(move |_| { if let Some(network) = network.upgrade() { network.trigger_repropagate(); diff --git a/substrate/core/sr-io/with_std.rs b/substrate/core/sr-io/with_std.rs index c3ba363d28..1e39f44252 100644 --- a/substrate/core/sr-io/with_std.rs +++ b/substrate/core/sr-io/with_std.rs @@ -41,7 +41,6 @@ use primitives::hexdisplay::HexDisplay; use primitives::H256; use hash_db::Hasher; -// TODO: use the real error, not NoError. environmental!(ext: trait Externalities); diff --git a/substrate/core/sr-io/without_std.rs b/substrate/core/sr-io/without_std.rs index f0ae5e7020..4c532089e3 100644 --- a/substrate/core/sr-io/without_std.rs +++ b/substrate/core/sr-io/without_std.rs @@ -315,8 +315,6 @@ pub fn trie_root< B: AsRef<[u8]>, >(_input: I) -> [u8; 32] { unimplemented!() - // TODO Maybe implement (though probably easier/cleaner to have blake2 be the only thing - // implemneted natively and compile the trie logic as wasm). } /// A trie root formed from the enumerated items. @@ -326,8 +324,6 @@ pub fn ordered_trie_root< A: AsRef<[u8]> >(_input: I) -> [u8; 32] { unimplemented!() - // TODO Maybe implement (though probably easier/cleaner to have blake2 be the only thing - // implemneted natively and compile the trie logic as wasm). } /// The current relay chain identifier. diff --git a/substrate/core/sr-primitives/src/generic/unchecked_extrinsic.rs b/substrate/core/sr-primitives/src/generic/unchecked_extrinsic.rs index 0cde5f42a3..2bcb2f5fb2 100644 --- a/substrate/core/sr-primitives/src/generic/unchecked_extrinsic.rs +++ b/substrate/core/sr-primitives/src/generic/unchecked_extrinsic.rs @@ -156,7 +156,6 @@ impl } } -/// TODO: use derive when possible. #[cfg(feature = "std")] impl fmt::Debug for UncheckedExtrinsic diff --git a/substrate/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs b/substrate/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs index 9ffac3d3f9..953d200113 100644 --- a/substrate/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs +++ b/substrate/core/sr-primitives/src/generic/unchecked_mortal_compact_extrinsic.rs @@ -168,7 +168,6 @@ impl serde::Serialize } } -/// TODO: use derive when possible. #[cfg(feature = "std")] impl fmt::Debug for UncheckedMortalCompactExtrinsic where Address: fmt::Debug, diff --git a/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs b/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs index e15a37c3bf..8aa6faa467 100644 --- a/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs +++ b/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs @@ -166,7 +166,6 @@ impl serde::Ser } } -/// TODO: use derive when possible. #[cfg(feature = "std")] impl fmt::Debug for UncheckedMortalExtrinsic where Address: fmt::Debug, diff --git a/substrate/core/sr-primitives/src/lib.rs b/substrate/core/sr-primitives/src/lib.rs index d0eb6efbfd..10ba97a6cc 100644 --- a/substrate/core/sr-primitives/src/lib.rs +++ b/substrate/core/sr-primitives/src/lib.rs @@ -579,7 +579,6 @@ mod tests { pub enum RawLog { B1(AuthorityId), B2(AuthorityId) } } - // TODO try to avoid redundant brackets: a(AuthoritiesChange), b impl_outer_log! { pub enum Log(InternalLog: DigestItem) for Runtime { a(AuthoritiesChange), b() diff --git a/substrate/core/state-db/src/noncanonical.rs b/substrate/core/state-db/src/noncanonical.rs index b1d34c09ad..e689152da0 100644 --- a/substrate/core/state-db/src/noncanonical.rs +++ b/substrate/core/state-db/src/noncanonical.rs @@ -220,7 +220,7 @@ impl NonCanonicalOverlay { commit.data.inserted = self.last_canonicalized_overlay.iter().map(|(k, v)| (k.clone(), v.clone())).collect(); commit.data.deleted = overlay.deleted; } else { - // TODO: borrow checker won't allow us to split out mutable references + // borrow checker won't allow us to split out mutable references // required for recursive processing. A more efficient implementation // that does not require converting to vector is possible let mut vec: Vec<_> = self.levels.drain(..).collect(); diff --git a/substrate/core/state-machine/src/backend.rs b/substrate/core/state-machine/src/backend.rs index a032ab1195..9467f340d1 100644 --- a/substrate/core/state-machine/src/backend.rs +++ b/substrate/core/state-machine/src/backend.rs @@ -118,7 +118,7 @@ impl Consolidate for MemoryDB { } /// Error impossible. -// TODO: use `!` type when stabilized. +// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 #[derive(Debug)] pub enum Void {} @@ -291,7 +291,7 @@ impl Backend for InMemory where H::Out: HeapSizeOf { } fn try_into_trie_backend(self) -> Option> { - let mut mdb = MemoryDB::default(); // TODO: should be more correct and use ::new() + let mut mdb = MemoryDB::default(); let mut root = None; for (storage_key, map) in self.inner { if storage_key != None { diff --git a/substrate/core/state-machine/src/changes_trie/changes_iterator.rs b/substrate/core/state-machine/src/changes_trie/changes_iterator.rs index af844db2c8..11bb63c58a 100644 --- a/substrate/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/substrate/core/state-machine/src/changes_trie/changes_iterator.rs @@ -104,7 +104,7 @@ pub fn key_changes_proof_check, H: Hasher>( max: u64, key: &[u8] ) -> Result, String> where H::Out: HeapSizeOf { - let mut proof_db = MemoryDB::::default(); // TODO: use new for correctness + let mut proof_db = MemoryDB::::default(); for item in proof { proof_db.insert(&item); } diff --git a/substrate/core/state-machine/src/changes_trie/prune.rs b/substrate/core/state-machine/src/changes_trie/prune.rs index 76c746cb72..00336ab15c 100644 --- a/substrate/core/state-machine/src/changes_trie/prune.rs +++ b/substrate/core/state-machine/src/changes_trie/prune.rs @@ -62,7 +62,7 @@ pub fn prune, H: Hasher, F: FnMut(H::Out)>( }; // delete changes trie for every block in range - // TODO: limit `max_digest_interval` so that this cycle won't involve huge ranges + // FIXME: limit `max_digest_interval` so that this cycle won't involve huge ranges for block in first..last+1 { let root = match storage.root(current_block, block) { Ok(Some(root)) => root, diff --git a/substrate/core/state-machine/src/ext.rs b/substrate/core/state-machine/src/ext.rs index 6274eef720..54d3511f42 100644 --- a/substrate/core/state-machine/src/ext.rs +++ b/substrate/core/state-machine/src/ext.rs @@ -302,7 +302,7 @@ where ); let root_and_tx = root_and_tx.map(|(root, changes)| { let mut calculated_root = Default::default(); - let mut mdb = MemoryDB::default(); // TODO: use new for correctness + let mut mdb = MemoryDB::default(); { let mut trie = TrieDBMut::::new(&mut mdb, &mut calculated_root); for (key, value) in changes { diff --git a/substrate/core/state-machine/src/proving_backend.rs b/substrate/core/state-machine/src/proving_backend.rs index 2e9d2e5ff2..bb15ea74cd 100644 --- a/substrate/core/state-machine/src/proving_backend.rs +++ b/substrate/core/state-machine/src/proving_backend.rs @@ -195,7 +195,7 @@ where H: Hasher, H::Out: HeapSizeOf, { - let mut db = MemoryDB::default(); // TODO: use new for correctness + let mut db = MemoryDB::default(); for item in proof { db.insert(&item); } diff --git a/substrate/core/state-machine/src/trie_backend.rs b/substrate/core/state-machine/src/trie_backend.rs index 543f8a2e04..a3947ab3db 100644 --- a/substrate/core/state-machine/src/trie_backend.rs +++ b/substrate/core/state-machine/src/trie_backend.rs @@ -82,7 +82,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn pairs(&self) -> Vec<(Vec, Vec)> { - let mut read_overlay = MemoryDB::default(); // TODO: use new for correctness + let mut read_overlay = MemoryDB::default(); let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); let collect_all = || -> Result<_, Box>> { @@ -106,7 +106,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn keys(&self, prefix: &Vec) -> Vec> { - let mut read_overlay = MemoryDB::default(); // TODO: use new for correctness + let mut read_overlay = MemoryDB::default(); let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); let collect_all = || -> Result<_, Box>> { @@ -193,7 +193,7 @@ pub mod tests { fn test_db() -> (MemoryDB, H256) { let mut root = H256::default(); - let mut mdb = MemoryDB::::default(); // TODO: use new() to be more correct + let mut mdb = MemoryDB::::default(); { let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"key", b"value").expect("insert failed"); @@ -230,7 +230,7 @@ pub mod tests { #[test] fn pairs_are_empty_on_empty_storage() { assert!(TrieBackend::, Blake2Hasher>::new( - MemoryDB::default(), // TODO: use new() to be more correct + MemoryDB::default(), Default::default(), ).pairs().is_empty()); } diff --git a/substrate/core/state-machine/src/trie_backend_essence.rs b/substrate/core/state-machine/src/trie_backend_essence.rs index cd5c046941..9163f5ded0 100644 --- a/substrate/core/state-machine/src/trie_backend_essence.rs +++ b/substrate/core/state-machine/src/trie_backend_essence.rs @@ -176,7 +176,7 @@ impl<'a, where H::Out: HeapSizeOf { fn keys(&self) -> HashMap { - self.overlay.keys() // TODO: iterate backing + self.overlay.keys() } fn get(&self, key: &H::Out) -> Option { diff --git a/substrate/core/test-client/src/lib.rs b/substrate/core/test-client/src/lib.rs index c2cd79f3a1..1e759fb9bc 100644 --- a/substrate/core/test-client/src/lib.rs +++ b/substrate/core/test-client/src/lib.rs @@ -49,7 +49,7 @@ use keyring::Keyring; mod local_executor { #![allow(missing_docs)] use super::runtime; - // TODO: change the macro and pass in the `BlakeHasher` that dispatch needs from here instead + // FIXME #1576 change the macro and pass in the `BlakeHasher` that dispatch needs from here instead native_executor_instance!( pub LocalExecutor, runtime::api::dispatch, diff --git a/substrate/core/transaction-pool/graph/src/lib.rs b/substrate/core/transaction-pool/graph/src/lib.rs index a11be3690b..d30032830c 100644 --- a/substrate/core/transaction-pool/graph/src/lib.rs +++ b/substrate/core/transaction-pool/graph/src/lib.rs @@ -20,9 +20,6 @@ //! and their priority. //! The pool is able to return an iterator that traverses transaction //! graph in the correct order taking into account priorities and dependencies. -//! -//! TODO [ToDr] -//! - [ ] Multi-threading (getting ready transactions should not block the pool) #![warn(missing_docs)] #![warn(unused_extern_crates)] diff --git a/substrate/core/transaction-pool/graph/src/pool.rs b/substrate/core/transaction-pool/graph/src/pool.rs index c9ee7c1e1f..eb0ff4876f 100644 --- a/substrate/core/transaction-pool/graph/src/pool.rs +++ b/substrate/core/transaction-pool/graph/src/pool.rs @@ -307,7 +307,6 @@ impl Pool { impl Pool { /// Create a new transaction pool. - /// TODO [ToDr] Options pub fn new(_options: Options, api: B) -> Self { Pool { api, diff --git a/substrate/core/transaction-pool/src/api.rs b/substrate/core/transaction-pool/src/api.rs index d8bfe66924..0d6c60a7cb 100644 --- a/substrate/core/transaction-pool/src/api.rs +++ b/substrate/core/transaction-pool/src/api.rs @@ -67,7 +67,6 @@ impl txpool::ChainApi for ChainApi where Ok(self.client.runtime_api().validate_transaction(at, uxt)?) } - // TODO [toDr] Use proper lbock number type fn block_id_to_number(&self, at: &BlockId) -> error::Result>> { Ok(self.client.block_number_from_id(at)?) } diff --git a/substrate/core/trie/src/lib.rs b/substrate/core/trie/src/lib.rs index f2f1183d4c..5b02e915cb 100644 --- a/substrate/core/trie/src/lib.rs +++ b/substrate/core/trie/src/lib.rs @@ -16,7 +16,7 @@ //! Utility functions to interact with Substrate's Base-16 Modified Merkle Patricia tree ("trie"). -// TODO: no_std +// FIXME: no_std - https://github.com/paritytech/substrate/issues/1574 mod error; mod node_header; @@ -71,7 +71,7 @@ pub fn delta_trie_root(db: &mut HashDB, mut root: H::Out, for (key, change) in delta { match change { Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, // TODO: archive mode + None => trie.remove(key.as_ref())?, }; } } @@ -151,7 +151,7 @@ pub fn child_delta_trie_root(_storage_key: &[u8], db: &mut H for (key, change) in delta { match change { Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, // TODO: archive mode + None => trie.remove(key.as_ref())?, }; } } diff --git a/substrate/core/trie/src/node_codec.rs b/substrate/core/trie/src/node_codec.rs index bb8e146b16..438b406009 100644 --- a/substrate/core/trie/src/node_codec.rs +++ b/substrate/core/trie/src/node_codec.rs @@ -95,14 +95,14 @@ impl trie_db::NodeCodec for NodeCodec { vec![EMPTY_TRIE] } - // TODO: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. + // FIXME: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. fn leaf_node(partial: &[u8], value: &[u8]) -> Vec { let mut output = partial_to_key(partial, LEAF_NODE_OFFSET, LEAF_NODE_BIG); value.encode_to(&mut output); output } - // TODO: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. + // FIXME: refactor this so that `partial` isn't already encoded with HPE. Should just be an `impl Iterator`. fn ext_node(partial: &[u8], child: ChildReference) -> Vec { let mut output = partial_to_key(partial, EXTENSION_NODE_OFFSET, EXTENSION_NODE_BIG); match child { diff --git a/substrate/core/trie/src/trie_stream.rs b/substrate/core/trie/src/trie_stream.rs index 3c8291389c..5f1bf1c9d9 100644 --- a/substrate/core/trie/src/trie_stream.rs +++ b/substrate/core/trie/src/trie_stream.rs @@ -67,34 +67,25 @@ impl trie_root::TrieStream for TrieStream { fn append_leaf(&mut self, key: &[u8], value: &[u8]) { self.buffer.extend(fuse_nibbles_node(key, true)); - // OPTIMISATION: I'd like to do `hpe.encode_to(&mut self.buffer);` here; need an `impl<'a> Encode for impl Iterator + 'a`? value.encode_to(&mut self.buffer); } fn begin_branch(&mut self, maybe_value: Option<&[u8]>, has_children: impl Iterator) { -// println!("[begin_branch] pushing BRANCH_NODE"); self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); // Push the value if one exists. if let Some(value) = maybe_value { value.encode_to(&mut self.buffer); } -// println!("[begin_branch] buffer so far: {:#x?}", self.buffer); } fn append_extension(&mut self, key: &[u8]) { self.buffer.extend(fuse_nibbles_node(key, false)); } fn append_substream(&mut self, other: Self) { let data = other.out(); -// println!("[append_substream] START own buffer: {:x?}", self.buffer); -// println!("[append_substream] START other buffer: {:x?}", data); match data.len() { 0...31 => { -// println!("[append_substream] appending data, because data.len() = {}", data.len()); data.encode_to(&mut self.buffer) }, _ => { -// println!("[append_substream] would have hashed, because data.len() = {}", data.len()); -// data.encode_to(&mut self.buffer) - // TODO: re-enable hashing before merging H::hash(&data).as_ref().encode_to(&mut self.buffer) } } diff --git a/substrate/node/cli/src/chain_spec.rs b/substrate/node/cli/src/chain_spec.rs index ccdb82b868..6429d4923d 100644 --- a/substrate/node/cli/src/chain_spec.rs +++ b/substrate/node/cli/src/chain_spec.rs @@ -58,7 +58,7 @@ fn staging_testnet_config_genesis() -> GenesisConfig { GenesisConfig { consensus: Some(ConsensusConfig { - code: include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm").to_vec(), // TODO change + code: include_bytes!("../../runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm").to_vec(), // FIXME change once we have #1252 authorities: initial_authorities.clone(), }), system: None, diff --git a/substrate/node/cli/src/service.rs b/substrate/node/cli/src/service.rs index 2e5aa74390..a773e3cf95 100644 --- a/substrate/node/cli/src/service.rs +++ b/substrate/node/cli/src/service.rs @@ -46,8 +46,7 @@ construct_simple_protocol! { /// Node specific configuration pub struct NodeConfig { /// grandpa connection to import block - // FIXME: rather than putting this on the config, let's have an actual intermediate setup state - // https://github.com/paritytech/substrate/issues/1134 + // FIXME #1134 rather than putting this on the config, let's have an actual intermediate setup state pub grandpa_import_setup: Option<(Arc>, grandpa::LinkHalfForService)>, inherent_data_providers: InherentDataProviders, } @@ -106,7 +105,7 @@ construct_service_factory! { executor.spawn(grandpa::run_grandpa( grandpa::Config { local_key, - // FIXME: make this available through chainspec? + // FIXME #1578 make this available through chainspec gossip_duration: Duration::new(4, 0), justification_period: 4096, name: Some(service.config.name.clone()) diff --git a/substrate/srml/consensus/src/lib.rs b/substrate/srml/consensus/src/lib.rs index 54da8a5de0..bd4b564985 100644 --- a/substrate/srml/consensus/src/lib.rs +++ b/substrate/srml/consensus/src/lib.rs @@ -201,7 +201,6 @@ decl_module! { /// Report some misbehaviour. fn report_misbehavior(origin, _report: Vec) { ensure_signed(origin)?; - // TODO: requires extension trait. } /// Note the previous block's validator missed their opportunity to propose a block. diff --git a/substrate/srml/contract/src/lib.rs b/substrate/srml/contract/src/lib.rs index f391f8ce5f..2f278b2a2c 100644 --- a/substrate/srml/contract/src/lib.rs +++ b/substrate/srml/contract/src/lib.rs @@ -347,12 +347,8 @@ decl_storage! { } } -// TODO: consider storing upper-bound for contract's gas limit in fixed-length runtime -// code in contract itself and use that. - /// The storage items associated with an account/key. /// -/// TODO: keys should also be able to take AsRef to ensure Vecs can be passed as &[u8] pub(crate) struct StorageOf(rstd::marker::PhantomData); impl StorageDoubleMap for StorageOf { const PREFIX: &'static [u8] = b"con:sto:"; diff --git a/substrate/srml/contract/src/wasm/runtime.rs b/substrate/srml/contract/src/wasm/runtime.rs index f18e5162b3..03f56cbc3d 100644 --- a/substrate/srml/contract/src/wasm/runtime.rs +++ b/substrate/srml/contract/src/wasm/runtime.rs @@ -196,8 +196,6 @@ fn write_sandbox_memory( // * AFTER MAKING A CHANGE MAKE SURE TO UPDATE COMPLEXITY.MD * // *********************************************************** -// TODO: ext_balance, ext_address, ext_callvalue, etc. - // Define a function `fn init_env() -> HostFunctionSet` that returns // a function set which can be imported by an executed contract. define_env!(Env, , diff --git a/substrate/srml/grandpa/src/lib.rs b/substrate/srml/grandpa/src/lib.rs index 1a0d0f7c31..54e78e9474 100644 --- a/substrate/srml/grandpa/src/lib.rs +++ b/substrate/srml/grandpa/src/lib.rs @@ -181,7 +181,7 @@ decl_module! { /// Report some misbehaviour. fn report_misbehavior(origin, _report: Vec) { ensure_signed(origin)?; - // TODO: https://github.com/paritytech/substrate/issues/1112 + // FIXME: https://github.com/paritytech/substrate/issues/1112 } fn on_finalise(block_number: T::BlockNumber) { @@ -259,7 +259,7 @@ impl Module where Ed25519AuthorityId: core::convert::From<(::rstd::marker::PhantomData); -// TODO: remove when https://github.com/rust-lang/rust/issues/26925 is fixed +// FIXME: remove when https://github.com/rust-lang/rust/issues/26925 is fixed impl Default for SyncedAuthorities { fn default() -> Self { SyncedAuthorities(::rstd::marker::PhantomData) diff --git a/substrate/srml/session/src/lib.rs b/substrate/srml/session/src/lib.rs index b758325cea..032b3328a3 100644 --- a/substrate/srml/session/src/lib.rs +++ b/substrate/srml/session/src/lib.rs @@ -138,7 +138,7 @@ decl_storage! { impl Module { /// The number of validators currently. pub fn validator_count() -> u32 { - >::get().len() as u32 // TODO: can probably optimised + >::get().len() as u32 } /// The last length change, if there was one, zero if not. @@ -158,7 +158,7 @@ impl Module { /// Called by `staking::new_era()` only. `next_session` should be called after this in order to /// update the session keys to the next validator set. pub fn set_validators(new: &[T::AccountId]) { - >::put(&new.to_vec()); // TODO: optimise. + >::put(&new.to_vec()); >::set_authorities( &new.iter().cloned().map(T::ConvertAccountIdToSessionKey::convert).collect::>() ); diff --git a/substrate/srml/staking/src/lib.rs b/substrate/srml/staking/src/lib.rs index f35ae3d436..15f0beadf7 100644 --- a/substrate/srml/staking/src/lib.rs +++ b/substrate/srml/staking/src/lib.rs @@ -355,7 +355,7 @@ impl Module { let noms = Self::current_nominators_for(v); let total = noms.iter().map(>::total_balance).fold(T::Balance::zero(), |acc, x| acc + x); if !total.is_zero() { - let safe_mul_rational = |b| b * rem / total;// TODO: avoid overflow + let safe_mul_rational = |b| b * rem / total;// FIXME #1572 avoid overflow for n in noms.iter() { let _ = >::slash(n, safe_mul_rational(>::total_balance(n))); // best effort - not much that can be done on fail. } @@ -376,7 +376,7 @@ impl Module { .map(>::total_balance) .fold(>::total_balance(who), |acc, x| acc + x) .max(One::one()); - let safe_mul_rational = |b| b * reward / total;// TODO: avoid overflow + let safe_mul_rational = |b| b * reward / total;// FIXME #1572: avoid overflow for n in noms.iter() { let _ = >::reward(n, safe_mul_rational(>::total_balance(n))); } @@ -454,7 +454,7 @@ impl Module { // combination of validators, then use session::internal::set_validators(). // for now, this just orders would-be stakers by their balances and chooses the top-most // >::get() of them. - // TODO: this is not sound. this should be moved to an off-chain solution mechanism. + // FIXME #1571 this is not sound. this should be moved to an off-chain solution mechanism. let mut intentions = Self::intentions() .into_iter() .map(|v| (Self::slashable_balance(&v), v)) diff --git a/substrate/srml/staking/src/tests.rs b/substrate/srml/staking/src/tests.rs index 01a03d3124..e6496159a1 100644 --- a/substrate/srml/staking/src/tests.rs +++ b/substrate/srml/staking/src/tests.rs @@ -416,7 +416,6 @@ fn nominating_slashes_should_work() { assert_eq!(Balances::total_balance(&2), 20); //not slashed assert_eq!(Balances::total_balance(&3), 10); //slashed assert_eq!(Balances::total_balance(&4), 30); //slashed - // TODO: change slash % to something sensible. }); } diff --git a/substrate/srml/support/procedural/src/storage/mod.rs b/substrate/srml/support/procedural/src/storage/mod.rs index 4e25992593..b3cf50ece0 100644 --- a/substrate/srml/support/procedural/src/storage/mod.rs +++ b/substrate/srml/support/procedural/src/storage/mod.rs @@ -74,7 +74,7 @@ struct AddExtraGenesisLine { pub extra_field: ext::Parens, pub coldot_token: Token![:], pub extra_type: syn::Type, - // TODO use a custom ext::Option instead (syn option on '=' fails) + // FIXME #1570: use a custom ext::Option instead (syn option on '=' fails) pub default_value: ext::Seq, } @@ -91,7 +91,7 @@ struct DeclStorageLine { pub build: Option, pub coldot_token: Token![:], pub storage_type: DeclStorageType, - // TODO use a custom ext::Option instead (syn option on '=' fails) + // FIXME #1570: use a custom ext::Option instead (syn option on '=' fails) pub default_value: ext::Seq, } diff --git a/substrate/srml/support/procedural/tools/src/lib.rs b/substrate/srml/support/procedural/tools/src/lib.rs index 1dae39fce4..a768f5ea0b 100644 --- a/substrate/srml/support/procedural/tools/src/lib.rs +++ b/substrate/srml/support/procedural/tools/src/lib.rs @@ -57,9 +57,7 @@ macro_rules! custom_keyword { } -// TODO following functions are copied from sr-api-macros : do a merge to get a unique procedural -// macro tooling crate (this crate path does not look good for it) - +// FIXME #1569, remove the following functions, which are copied from sr-api-macros use proc_macro2::{TokenStream, Span}; use syn::Ident; diff --git a/substrate/srml/support/src/dispatch.rs b/substrate/srml/support/src/dispatch.rs index 655f35bb90..d0592711b7 100644 --- a/substrate/srml/support/src/dispatch.rs +++ b/substrate/srml/support/src/dispatch.rs @@ -485,7 +485,7 @@ macro_rules! decl_module { // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Debug))] - // TODO: switching based on std feature is because of an issue in + // FIXME: switching based on std feature is because of an issue in // serde-derive for when we attempt to derive `Deserialize` on these types, // in a situation where we've imported `srml_support` as another name. #[cfg(feature = "std")] diff --git a/substrate/srml/support/src/storage/generator.rs b/substrate/srml/support/src/storage/generator.rs index 7cf5e28ded..79469d761f 100644 --- a/substrate/srml/support/src/storage/generator.rs +++ b/substrate/srml/support/src/storage/generator.rs @@ -194,7 +194,7 @@ pub trait StorageMap { fn mutate R, S: Storage>(key: &K, f: F, storage: &S) -> R; } -// TODO: Remove this in favour of `decl_storage` macro. +// FIXME #1466 Remove this in favour of `decl_storage` macro. /// Declares strongly-typed wrappers around codec-compatible types in storage. #[macro_export] macro_rules! storage_items { @@ -445,7 +445,6 @@ macro_rules! __storage_items_internal { } /// Get the key used to put the length field. - // TODO: concat macro should accept byte literals. fn len_key() -> $crate::rstd::vec::Vec { let mut key = $prefix.to_vec(); key.extend(b"len"); @@ -513,7 +512,7 @@ macro_rules! __handle_wrap_internal { }; } -// TODO: revisit this idiom once we get `type`s in `impl`s. +// FIXME: revisit this idiom once we get `type`s in `impl`s. /*impl Module { type Now = super::Now; }*/ diff --git a/substrate/srml/support/src/storage/mod.rs b/substrate/srml/support/src/storage/mod.rs index 6e1302718a..c788c7c402 100644 --- a/substrate/srml/support/src/storage/mod.rs +++ b/substrate/srml/support/src/storage/mod.rs @@ -24,8 +24,6 @@ use codec::{Codec, Decode, KeyedVec, Input}; #[macro_use] pub mod generator; -// TODO: consider using blake256 to avoid possible preimage attack. - struct IncrementalInput<'a> { key: &'a [u8], pos: usize, diff --git a/substrate/srml/system/src/lib.rs b/substrate/srml/system/src/lib.rs index 93ccd85c67..c47c970a98 100644 --- a/substrate/srml/system/src/lib.rs +++ b/substrate/srml/system/src/lib.rs @@ -344,9 +344,9 @@ impl Module { #[cfg(any(feature = "std", test))] pub fn externalities() -> TestExternalities { TestExternalities::new(map![ - twox_128(&>::key_for(T::BlockNumber::zero())).to_vec() => [69u8; 32].encode(), // TODO: replace with Hash::default().encode + twox_128(&>::key_for(T::BlockNumber::zero())).to_vec() => [69u8; 32].encode(), twox_128(>::key()).to_vec() => T::BlockNumber::one().encode(), - twox_128(>::key()).to_vec() => [69u8; 32].encode(), // TODO: replace with Hash::default().encode + twox_128(>::key()).to_vec() => [69u8; 32].encode(), twox_128(>::key()).to_vec() => T::Hash::default().encode() ]) }