Split ListLen into ListLenU64 and ListLenUsize

The usize one gets used internally for things like
pattern matches. This is both more efficient (means
they don't have to do unnecessary casts) and also
less error-prone due to e.g. comparing length to
capacity, which is usize.
This commit is contained in:
Richard Feldman 2024-02-14 20:41:52 -05:00
parent a15cc0589c
commit ada83561e5
No known key found for this signature in database
GPG key ID: F1F21AA5B1D9E43B
17 changed files with 88 additions and 48 deletions

View file

@ -2830,8 +2830,13 @@ impl<
}
}
fn build_list_len(&mut self, dst: &Symbol, list: &Symbol) {
self.storage_manager.list_len(&mut self.buf, dst, list);
fn build_list_len_usize(&mut self, dst: &Symbol, list: &Symbol) {
self.storage_manager
.list_len_usize(&mut self.buf, dst, list);
}
fn build_list_len_u64(&mut self, dst: &Symbol, list: &Symbol) {
self.storage_manager.list_len_u64(&mut self.buf, dst, list);
}
fn build_list_clone(

View file

@ -694,7 +694,7 @@ impl<
}
// Loads the dst to be the later 64 bits of a list (its length).
pub fn list_len(&mut self, _buf: &mut Vec<'a, u8>, dst: &Symbol, list: &Symbol) {
pub fn list_len_u64(&mut self, _buf: &mut Vec<'a, u8>, dst: &Symbol, list: &Symbol) {
let owned_data = self.remove_allocation_for_sym(list);
self.allocation_map.insert(*list, Rc::clone(&owned_data));
self.allocation_map.insert(*dst, owned_data);
@ -709,6 +709,11 @@ impl<
);
}
/// In a 64-bit backend, this is the same as list_len_u64
pub fn list_len_usize(&mut self, buf: &mut Vec<'a, u8>, dst: &Symbol, list: &Symbol) {
self.list_len_u64(buf, dst, list)
}
/// Creates a struct on the stack, moving the data in fields into the struct.
pub fn create_struct(
&mut self,