Remove the trick of pushing an extra N+1 entry to function_offsets. It's bug-prone.

This commit is contained in:
Brian Carroll 2022-11-17 07:43:26 +00:00
parent 0c81063c68
commit 24e6e8445d
No known key found for this signature in database
GPG key ID: 5C7B2EC4101703C0
2 changed files with 24 additions and 20 deletions

View file

@ -267,7 +267,7 @@ impl<'a> WasmModule<'a> {
self.names.function_names[old_index].1 = new_name;
}
// Relocate calls from to JS imports
// Relocate calls to JS imports
// This must happen *before* we run dead code elimination on the code section,
// so that byte offsets in the linking data will still be valid.
for (new_index, &old_index) in live_import_fns.iter().enumerate() {
@ -291,7 +291,11 @@ impl<'a> WasmModule<'a> {
for (i, fn_index) in (fn_index_min..fn_index_max).enumerate() {
if live_flags[fn_index as usize] {
let code_start = self.code.function_offsets[i] as usize;
let code_end = self.code.function_offsets[i + 1] as usize;
let code_end = if i < self.code.function_offsets.len() - 1 {
self.code.function_offsets[i + 1] as usize
} else {
self.code.bytes.len()
};
buffer.extend_from_slice(&self.code.bytes[code_start..code_end]);
} else {
DUMMY_FUNCTION.serialize(&mut buffer);
@ -367,7 +371,11 @@ impl<'a> WasmModule<'a> {
// Find where the function body is
let offset_index = fn_index - fn_index_min as usize;
let code_start = self.code.function_offsets[offset_index];
let code_end = self.code.function_offsets[offset_index + 1];
let code_end = if offset_index < self.code.function_offsets.len() - 1 {
self.code.function_offsets[offset_index + 1]
} else {
self.code.bytes.len() as u32
};
// For each call in the body
for (offset, symbol) in call_offsets_and_symbols.iter() {

View file

@ -1195,37 +1195,33 @@ impl<'a> CodeSection<'a> {
});
}
*cursor += 1;
let section_size = u32::parse((), module_bytes, cursor)?;
let section_size = u32::parse((), module_bytes, cursor)? as usize;
let section_body_start = *cursor;
let count = u32::parse((), module_bytes, cursor)?;
let function_bodies_start = *cursor;
let next_section_start = section_body_start + section_size as usize;
let function_count = u32::parse((), module_bytes, cursor)?;
let next_section_start = section_body_start + section_size;
// preloaded_bytes starts at the function count, since that's considered the zero offset in the linker data.
// But when we finally write to file, we'll exclude the function count and write our own, including app fns.
let mut preloaded_bytes =
Vec::with_capacity_in(next_section_start - function_bodies_start, arena);
preloaded_bytes.extend_from_slice(&module_bytes[section_body_start..*cursor]);
// `bytes` must include the function count for linker offsets to be correct.
let mut bytes = Vec::with_capacity_in(section_size + section_size / 2, arena);
bytes.extend_from_slice(&module_bytes[section_body_start..*cursor]);
let mut preloaded_offsets = Vec::with_capacity_in(count as usize, arena);
let mut function_offsets = Vec::with_capacity_in(function_count as usize, arena);
// While copying the code bytes, also note where each function starts & ends
// Later we will use this for dead code elimination
while *cursor < next_section_start {
let fn_start = *cursor;
preloaded_offsets.push((fn_start - section_body_start) as u32);
function_offsets.push((fn_start - section_body_start) as u32);
let fn_length = u32::parse((), module_bytes, cursor)? as usize;
*cursor += fn_length;
preloaded_bytes.extend_from_slice(&module_bytes[fn_start..*cursor]);
bytes.extend_from_slice(&module_bytes[fn_start..*cursor]);
}
preloaded_offsets.push((next_section_start - section_body_start) as u32);
debug_assert_eq!(preloaded_offsets.len(), 1 + count as usize);
debug_assert_eq!(function_offsets.len(), function_count as usize);
Ok(CodeSection {
function_count: count,
bytes: preloaded_bytes,
function_offsets: preloaded_offsets,
function_count,
bytes,
function_offsets,
dead_import_dummy_count: 0,
})
}