Merge remote-tracking branch 'origin/trunk' into parse-to-edit-ast

This commit is contained in:
Folkert 2020-12-16 00:29:27 +01:00
commit 421e652edb
29 changed files with 332 additions and 457 deletions

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod link; pub mod link;
pub mod program; pub mod program;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod bitcode; pub mod bitcode;
pub mod std; pub mod std;

View file

@ -83,6 +83,7 @@ enum PendingDef<'a> {
InvalidAlias, InvalidAlias,
} }
// See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
pub enum Declaration { pub enum Declaration {

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod annotation; pub mod annotation;
pub mod builtins; pub mod builtins;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod all; pub mod all;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod builtins; pub mod builtins;
pub mod expr; pub mod expr;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod annotation; pub mod annotation;
pub mod def; pub mod def;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod llvm; pub mod llvm;

View file

@ -3064,7 +3064,14 @@ fn run_low_level<'a, 'ctx, 'env>(
let wrapper_struct = wrapper_struct.into_struct_value(); let wrapper_struct = wrapper_struct.into_struct_value();
let elem_index = load_symbol(env, scope, &args[1]).into_int_value(); let elem_index = load_symbol(env, scope, &args[1]).into_int_value();
list_get_unsafe(env, list_layout, elem_index, wrapper_struct) list_get_unsafe(
env,
layout_ids,
parent,
list_layout,
elem_index,
wrapper_struct,
)
} }
ListSetInPlace => { ListSetInPlace => {
let (list_symbol, list_layout) = load_symbol_and_layout(env, scope, &args[0]); let (list_symbol, list_layout) = load_symbol_and_layout(env, scope, &args[0]);

View file

@ -553,6 +553,8 @@ pub fn list_reverse<'a, 'ctx, 'env>(
pub fn list_get_unsafe<'a, 'ctx, 'env>( pub fn list_get_unsafe<'a, 'ctx, 'env>(
env: &Env<'a, 'ctx, 'env>, env: &Env<'a, 'ctx, 'env>,
layout_ids: &mut LayoutIds<'a>,
parent: FunctionValue<'ctx>,
list_layout: &Layout<'a>, list_layout: &Layout<'a>,
elem_index: IntValue<'ctx>, elem_index: IntValue<'ctx>,
wrapper_struct: StructValue<'ctx>, wrapper_struct: StructValue<'ctx>,
@ -572,7 +574,11 @@ pub fn list_get_unsafe<'a, 'ctx, 'env>(
let elem_ptr = let elem_ptr =
unsafe { builder.build_in_bounds_gep(array_data_ptr, &[elem_index], "elem") }; unsafe { builder.build_in_bounds_gep(array_data_ptr, &[elem_index], "elem") };
builder.build_load(elem_ptr, "List.get") let result = builder.build_load(elem_ptr, "List.get");
increment_refcount_layout(env, parent, layout_ids, result, elem_layout);
result
} }
_ => { _ => {
unreachable!( unreachable!(

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
use bumpalo::{collections::Vec, Bump}; use bumpalo::{collections::Vec, Bump};

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod docs; pub mod docs;
pub mod effect_module; pub mod effect_module;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod ident; pub mod ident;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod borrow; pub mod borrow;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
#[macro_use] #[macro_use]

View file

@ -1,13 +1,4 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod can; pub mod can;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod all; pub mod all;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod error; pub mod error;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod module; pub mod module;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod boolean_algebra; pub mod boolean_algebra;
pub mod builtin_aliases; pub mod builtin_aliases;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub mod unify; pub mod unify;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data struture memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
pub use roc_can::expr::Expr::*; pub use roc_can::expr::Expr::*;

View file

@ -1,14 +1,5 @@
#![warn(clippy::all, clippy::dbg_macro)] #![warn(clippy::all, clippy::dbg_macro)]
// I'm skeptical that clippy:large_enum_variant is a good lint to have globally enabled. // See github.com/rtfeldman/roc/issues/800 for discussion of the large_enum_variant check.
//
// It warns about a performance problem where the only quick remediation is
// to allocate more on the heap, which has lots of tradeoffs - including making it
// long-term unclear which allocations *need* to happen for compilation's sake
// (e.g. recursive structures) versus those which were only added to appease clippy.
//
// Effectively optimizing data structure memory layout isn't a quick fix,
// and encouraging shortcuts here creates bad incentives. I would rather temporarily
// re-enable this when working on performance optimizations than have it block PRs.
#![allow(clippy::large_enum_variant)] #![allow(clippy::large_enum_variant)]
// Inspired by: // Inspired by:
@ -20,12 +11,10 @@
use crate::buffer::create_rect_buffers; use crate::buffer::create_rect_buffers;
use crate::text::{build_glyph_brush, Text}; use crate::text::{build_glyph_brush, Text};
use crate::vertex::Vertex; use crate::vertex::Vertex;
use cgmath::Ortho; use ortho::{init_ortho, update_ortho_buffer, OrthoResources};
use std::error::Error; use std::error::Error;
use std::io; use std::io;
use std::path::Path; use std::path::Path;
use wgpu::util::DeviceExt;
use wgpu::{BindGroup, BindGroupLayoutDescriptor, BindGroupLayoutEntry, Buffer, ShaderStage};
use winit::event; use winit::event;
use winit::event::{Event, ModifiersState}; use winit::event::{Event, ModifiersState};
use winit::event_loop::ControlFlow; use winit::event_loop::ControlFlow;
@ -35,6 +24,7 @@ mod buffer;
pub mod expr; pub mod expr;
pub mod file; pub mod file;
mod keyboard_input; mod keyboard_input;
mod ortho;
pub mod pool; pub mod pool;
mod rect; mod rect;
pub mod text; pub mod text;
@ -51,31 +41,6 @@ pub fn launch(_filepaths: &[&Path]) -> io::Result<()> {
Ok(()) Ok(())
} }
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Uniforms {
// We can't use cgmath with bytemuck directly so we'll have
// to convert the Matrix4 into a 4x4 f32 array
ortho: [[f32; 4]; 4],
}
impl Uniforms {
fn new(w: u32, h: u32) -> Self {
let ortho: cgmath::Matrix4<f32> = Ortho::<f32> {
left: 0.0,
right: w as f32,
bottom: h as f32,
top: 0.0,
near: -1.0,
far: 1.0,
}
.into();
Self {
ortho: ortho.into(),
}
}
}
fn run_event_loop() -> Result<(), Box<dyn Error>> { fn run_event_loop() -> Result<(), Box<dyn Error>> {
env_logger::init(); env_logger::init();
@ -133,8 +98,7 @@ fn run_event_loop() -> Result<(), Box<dyn Error>> {
let mut swap_chain = gpu_device.create_swap_chain(&surface, &swap_chain_descr); let mut swap_chain = gpu_device.create_swap_chain(&surface, &swap_chain_descr);
let (rect_pipeline, ortho_bind_group, ortho_buffer) = let (rect_pipeline, ortho) = make_rect_pipeline(&gpu_device, &swap_chain_descr);
make_rect_pipeline(&gpu_device, &swap_chain_descr);
let mut glyph_brush = build_glyph_brush(&gpu_device, render_format)?; let mut glyph_brush = build_glyph_brush(&gpu_device, render_format)?;
@ -179,33 +143,13 @@ fn run_event_loop() -> Result<(), Box<dyn Error>> {
}, },
); );
// update orthographic buffer according to new window size update_ortho_buffer(
let new_uniforms = Uniforms::new(size.width, size.height); size.width,
size.height,
let new_ortho_buffer = &gpu_device,
gpu_device.create_buffer_init(&wgpu::util::BufferInitDescriptor { &ortho.buffer,
label: Some("Ortho uniform buffer"), &cmd_queue,
contents: bytemuck::cast_slice(&[new_uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_SRC,
});
// get a command encoder for the current frame
let mut encoder =
gpu_device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Resize"),
});
// overwrite the new buffer over the old one
encoder.copy_buffer_to_buffer(
&new_ortho_buffer,
0,
&ortho_buffer,
0,
(std::mem::size_of::<Uniforms>() * vec![new_uniforms].as_slice().len())
as wgpu::BufferAddress,
); );
cmd_queue.submit(Some(encoder.finish()));
} }
//Received Character //Received Character
Event::WindowEvent { Event::WindowEvent {
@ -260,7 +204,7 @@ fn run_event_loop() -> Result<(), Box<dyn Error>> {
if rect_buffers.num_rects > 0 { if rect_buffers.num_rects > 0 {
render_pass.set_pipeline(&rect_pipeline); render_pass.set_pipeline(&rect_pipeline);
render_pass.set_bind_group(0, &ortho_bind_group, &[]); render_pass.set_bind_group(0, &ortho.bind_group, &[]);
render_pass.set_vertex_buffer(0, rect_buffers.vertex_buffer.slice(..)); render_pass.set_vertex_buffer(0, rect_buffers.vertex_buffer.slice(..));
render_pass.set_index_buffer(rect_buffers.index_buffer.slice(..)); render_pass.set_index_buffer(rect_buffers.index_buffer.slice(..));
render_pass.draw_indexed(0..rect_buffers.num_rects, 0, 0..1); render_pass.draw_indexed(0..rect_buffers.num_rects, 0, 0..1);
@ -300,41 +244,11 @@ fn run_event_loop() -> Result<(), Box<dyn Error>> {
fn make_rect_pipeline( fn make_rect_pipeline(
gpu_device: &wgpu::Device, gpu_device: &wgpu::Device,
swap_chain_descr: &wgpu::SwapChainDescriptor, swap_chain_descr: &wgpu::SwapChainDescriptor,
) -> (wgpu::RenderPipeline, BindGroup, Buffer) { ) -> (wgpu::RenderPipeline, OrthoResources) {
let uniforms = Uniforms::new(swap_chain_descr.width, swap_chain_descr.height); let ortho = init_ortho(swap_chain_descr.width, swap_chain_descr.height, gpu_device);
// orthographic projection is used to transfrom pixel coords to the coordinate system used by wgpu
let ortho_buffer = gpu_device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Ortho uniform buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// bind groups consist of extra resources that are provided to the shaders
let ortho_bind_group_layout = gpu_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
entries: &[BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Ortho bind group layout"),
});
let ortho_bind_group = gpu_device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &ortho_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(ortho_buffer.slice(..)),
}],
label: Some("Ortho bind group"),
});
let pipeline_layout = gpu_device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { let pipeline_layout = gpu_device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
bind_group_layouts: &[&ortho_bind_group_layout], bind_group_layouts: &[&ortho.bind_group_layout],
push_constant_ranges: &[], push_constant_ranges: &[],
label: Some("Rectangle pipeline layout"), label: Some("Rectangle pipeline layout"),
}); });
@ -347,7 +261,7 @@ fn make_rect_pipeline(
wgpu::include_spirv!("shaders/rect.frag.spv"), wgpu::include_spirv!("shaders/rect.frag.spv"),
); );
(pipeline, ortho_bind_group, ortho_buffer) (pipeline, ortho)
} }
fn create_render_pipeline( fn create_render_pipeline(

116
editor/src/ortho.rs Normal file
View file

@ -0,0 +1,116 @@
use cgmath::{Matrix4, Ortho};
use wgpu::util::DeviceExt;
use wgpu::{
BindGroup, BindGroupLayout, BindGroupLayoutDescriptor, BindGroupLayoutEntry, Buffer,
ShaderStage,
};
// orthographic projection is used to transfrom pixel coords to the coordinate system used by wgpu
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct Uniforms {
// We can't use cgmath with bytemuck directly so we'll have
// to convert the Matrix4 into a 4x4 f32 array
ortho: [[f32; 4]; 4],
}
impl Uniforms {
fn new(w: u32, h: u32) -> Self {
let ortho: Matrix4<f32> = Ortho::<f32> {
left: 0.0,
right: w as f32,
bottom: h as f32,
top: 0.0,
near: -1.0,
far: 1.0,
}
.into();
Self {
ortho: ortho.into(),
}
}
}
// update orthographic buffer according to new window size
pub fn update_ortho_buffer(
inner_width: u32,
inner_height: u32,
gpu_device: &wgpu::Device,
ortho_buffer: &Buffer,
cmd_queue: &wgpu::Queue,
) {
let new_uniforms = Uniforms::new(inner_width, inner_height);
let new_ortho_buffer = gpu_device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Ortho uniform buffer"),
contents: bytemuck::cast_slice(&[new_uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_SRC,
});
// get a command encoder for the current frame
let mut encoder = gpu_device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Resize"),
});
// overwrite the new buffer over the old one
encoder.copy_buffer_to_buffer(
&new_ortho_buffer,
0,
ortho_buffer,
0,
(std::mem::size_of::<Uniforms>() * vec![new_uniforms].as_slice().len())
as wgpu::BufferAddress,
);
cmd_queue.submit(Some(encoder.finish()));
}
pub struct OrthoResources {
pub buffer: Buffer,
pub bind_group_layout: BindGroupLayout,
pub bind_group: BindGroup,
}
pub fn init_ortho(
inner_width: u32,
inner_height: u32,
gpu_device: &wgpu::Device,
) -> OrthoResources {
let uniforms = Uniforms::new(inner_width, inner_height);
let ortho_buffer = gpu_device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Ortho uniform buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
// bind groups consist of extra resources that are provided to the shaders
let ortho_bind_group_layout = gpu_device.create_bind_group_layout(&BindGroupLayoutDescriptor {
entries: &[BindGroupLayoutEntry {
binding: 0,
visibility: ShaderStage::VERTEX,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Ortho bind group layout"),
});
let ortho_bind_group = gpu_device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &ortho_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(ortho_buffer.slice(..)),
}],
label: Some("Ortho bind group"),
});
OrthoResources {
buffer: ortho_buffer,
bind_group_layout: ortho_bind_group_layout,
bind_group: ortho_bind_group,
}
}

View file

@ -14,10 +14,18 @@ extern "C" {
fn roc_main_size() -> i64; fn roc_main_size() -> i64;
#[link_name = "roc__mainForHost_1_Fx_caller"] #[link_name = "roc__mainForHost_1_Fx_caller"]
fn call_Fx(function_pointer: *const u8, closure_data: *const u8, output: *mut u8) -> (); fn call_Fx(
flags: &(),
function_pointer: *const u8,
closure_data: *const u8,
output: *mut u8,
) -> ();
#[link_name = "roc__mainForHost_1_Fx_size"] #[link_name = "roc__mainForHost_1_Fx_size"]
fn size_Fx() -> i64; fn size_Fx() -> i64;
#[link_name = "roc__mainForHost_1_Fx_result_size"]
fn size_Fx_result() -> i64;
} }
#[no_mangle] #[no_mangle]
@ -48,13 +56,14 @@ pub fn roc_fx_getLine() -> RocStr {
} }
unsafe fn call_the_closure(function_pointer: *const u8, closure_data_ptr: *const u8) -> i64 { unsafe fn call_the_closure(function_pointer: *const u8, closure_data_ptr: *const u8) -> i64 {
let size = size_Fx() as usize; let size = size_Fx_result() as usize;
alloca::with_stack_bytes(size, |buffer| { alloca::with_stack_bytes(size, |buffer| {
let buffer: *mut std::ffi::c_void = buffer; let buffer: *mut std::ffi::c_void = buffer;
let buffer: *mut u8 = buffer as *mut u8; let buffer: *mut u8 = buffer as *mut u8;
call_Fx( call_Fx(
&(),
function_pointer, function_pointer,
closure_data_ptr as *const u8, closure_data_ptr as *const u8,
buffer as *mut u8, buffer as *mut u8,

View file

@ -10,8 +10,11 @@ const Allocator = mem.Allocator;
extern fn roc__mainForHost_1_exposed([*]u8) void; extern fn roc__mainForHost_1_exposed([*]u8) void;
extern fn roc__mainForHost_1_size() i64; extern fn roc__mainForHost_1_size() i64;
extern fn roc__mainForHost_1_Fx_caller(*const u8, [*]u8, [*]u8) void; extern fn roc__mainForHost_1_Fx_caller(*const u8, *const u8, [*]u8, [*]u8) void;
extern fn roc__mainForHost_1_Fx_size() i64; extern fn roc__mainForHost_1_Fx_size() i64;
extern fn roc__mainForHost_1_Fx_result_size() i64;
const Unit = extern struct {};
pub export fn main() u8 { pub export fn main() u8 {
const stdout = std.io.getStdOut().writer(); const stdout = std.io.getStdOut().writer();
@ -44,7 +47,7 @@ pub export fn main() u8 {
} }
fn call_the_closure(function_pointer: *const u8, closure_data_pointer: [*]u8) void { fn call_the_closure(function_pointer: *const u8, closure_data_pointer: [*]u8) void {
const size = roc__mainForHost_1_Fx_size(); const size = roc__mainForHost_1_Fx_result_size();
const raw_output = std.heap.c_allocator.alloc(u8, @intCast(usize, size)) catch unreachable; const raw_output = std.heap.c_allocator.alloc(u8, @intCast(usize, size)) catch unreachable;
var output = @ptrCast([*]u8, raw_output); var output = @ptrCast([*]u8, raw_output);
@ -52,7 +55,9 @@ fn call_the_closure(function_pointer: *const u8, closure_data_pointer: [*]u8) vo
std.heap.c_allocator.free(raw_output); std.heap.c_allocator.free(raw_output);
} }
roc__mainForHost_1_Fx_caller(function_pointer, closure_data_pointer, output); const flags: u8 = 0;
roc__mainForHost_1_Fx_caller(&flags, function_pointer, closure_data_pointer, output);
const elements = @ptrCast([*]u64, @alignCast(8, output)); const elements = @ptrCast([*]u64, @alignCast(8, output));

View file

@ -13,7 +13,7 @@ platform folkertdev/foo
mainForHost : mainForHost :
{ {
init : ({} -> { model: I64, cmd : (Cmd.Cmd [ Line Str ]) as Fx }) as Init, init : ({} -> { model: I64 as Model, cmd : (Cmd.Cmd [ Line Str ]) as Fx }) as Init,
update : ([ Line Str ], I64 -> { model: I64, cmd : Cmd.Cmd [ Line Str ] } ) as Update update : ([ Line Str ], I64 -> { model: I64, cmd : Cmd.Cmd [ Line Str ] } ) as Update
} }
mainForHost = main mainForHost = main

View file

@ -6,8 +6,7 @@ use roc_std::RocStr;
use std::alloc::Layout; use std::alloc::Layout;
use std::time::SystemTime; use std::time::SystemTime;
type Msg = RocStr; type Model = *const u8;
type Model = i64;
extern "C" { extern "C" {
#[link_name = "roc__mainForHost_1_exposed"] #[link_name = "roc__mainForHost_1_exposed"]
@ -32,8 +31,8 @@ extern "C" {
#[link_name = "roc__mainForHost_1_Update_caller"] #[link_name = "roc__mainForHost_1_Update_caller"]
fn call_Update( fn call_Update(
msg: &Msg, msg: Msg,
model: &Model, model: Model,
function_pointer: *const u8, function_pointer: *const u8,
closure_data: *const u8, closure_data: *const u8,
output: *mut u8, output: *mut u8,
@ -84,9 +83,7 @@ pub fn roc_fx_putChar(foo: i64) -> () {
#[no_mangle] #[no_mangle]
pub fn roc_fx_putLine(line: RocStr) -> () { pub fn roc_fx_putLine(line: RocStr) -> () {
let bytes = line.as_slice(); println!("{}", unsafe { line.as_str() });
let string = unsafe { std::str::from_utf8_unchecked(bytes) };
println!("{}", string);
() ()
} }
@ -104,9 +101,8 @@ pub fn roc_fx_getLine() -> RocStr {
unsafe fn run_fx(function_pointer: *const u8, closure_data_ptr: *const u8) -> Msg { unsafe fn run_fx(function_pointer: *const u8, closure_data_ptr: *const u8) -> Msg {
let size = size_Fx_result() as usize; let size = size_Fx_result() as usize;
alloca::with_stack_bytes(size, |buffer| { let layout = Layout::array::<u8>(size).unwrap();
let buffer: *mut std::ffi::c_void = buffer; let buffer = std::alloc::alloc(layout);
let buffer: *mut u8 = buffer as *mut u8;
call_Fx( call_Fx(
function_pointer, function_pointer,
@ -117,24 +113,82 @@ unsafe fn run_fx(function_pointer: *const u8, closure_data_ptr: *const u8) -> Ms
let output = &*(buffer as *mut RocCallResult<()>); let output = &*(buffer as *mut RocCallResult<()>);
match output.into() { match output.into() {
Ok(()) => { Ok(()) => Msg { msg: buffer.add(8) },
let mut bytes = *(buffer.add(8) as *const (u64, u64));
let msg = std::mem::transmute::<(u64, u64), RocStr>(bytes);
msg
}
Err(e) => panic!("failed with {}", e), Err(e) => panic!("failed with {}", e),
} }
})
} }
unsafe fn run_init(function_pointer: *const u8, closure_data_ptr: *const u8) -> (Model, Msg) { struct Msg {
msg: *mut u8,
}
impl Msg {
unsafe fn alloc(size: usize) -> Self {
let size = size_Fx_result() as usize;
let layout = Layout::array::<u8>(size).unwrap();
let msg = std::alloc::alloc(layout);
Self { msg }
}
}
impl Drop for Msg {
fn drop(&mut self) {
unsafe {
let size = size_Fx_result() as usize;
let layout = Layout::array::<u8>(size).unwrap();
std::alloc::dealloc(self.msg.offset(-8), layout);
}
}
}
struct ModelCmd {
buffer: *mut u8,
cmd_fn_ptr_ptr: *const u8,
cmd_closure_data_ptr: *const u8,
model: *const u8,
}
impl ModelCmd {
unsafe fn alloc() -> Self {
let size = 8 + size_Fx() as usize + size_Model() as usize;
let layout = Layout::array::<u8>(size).unwrap();
let buffer = std::alloc::alloc(layout);
let cmd_fn_ptr_ptr = buffer.add(8);
let cmd_closure_data_ptr = buffer.add(8 + 8);
let model = buffer.add(8 + size_Fx() as usize);
Self {
buffer,
cmd_fn_ptr_ptr,
cmd_closure_data_ptr,
model,
}
}
}
impl Drop for ModelCmd {
fn drop(&mut self) {
unsafe {
let size = 8 + size_Fx() as usize + size_Model() as usize;
let layout = Layout::array::<u8>(size).unwrap();
std::alloc::dealloc(self.buffer, layout);
}
}
}
unsafe fn run_init(
function_pointer: *const u8,
closure_data_ptr: *const u8,
) -> Result<ModelCmd, String> {
debug_assert_eq!(size_Init_result(), 8 + size_Fx() + size_Model());
let size = size_Init_result() as usize; let size = size_Init_result() as usize;
alloca::with_stack_bytes(size, |buffer| { let model_cmd = ModelCmd::alloc();
let buffer: *mut std::ffi::c_void = buffer; let buffer = model_cmd.buffer;
let buffer: *mut u8 = buffer as *mut u8;
call_Init(function_pointer, 0 as *const u8, buffer as *mut u8); call_Init(function_pointer, 0 as *const u8, buffer as *mut u8);
@ -142,42 +196,26 @@ unsafe fn run_init(function_pointer: *const u8, closure_data_ptr: *const u8) ->
let output = &*(buffer as *mut RocCallResult<()>); let output = &*(buffer as *mut RocCallResult<()>);
match output.into() { match output.into() {
Ok(_) => { Ok(_) => Ok(model_cmd),
let offset = 8 + size_Fx(); Err(e) => Err(e.to_string()),
let model_ptr = buffer.add(offset as usize);
let model: i64 = *(model_ptr as *const i64);
let cmd_fn_ptr_ptr = buffer.add(8) as *const i64;
let cmd_fn_ptr = (*cmd_fn_ptr_ptr) as *const u8;
let cmd_closure_data_ptr = buffer.add(16);
let msg = run_fx(cmd_fn_ptr, cmd_closure_data_ptr);
(model, msg)
} }
Err(e) => panic!("failed with {}", e),
}
})
} }
unsafe fn run_update( unsafe fn run_update(
msg: RocStr, msg: Msg,
model: Model, model: Model,
function_pointer: *const u8, function_pointer: *const u8,
closure_data_ptr: *const u8, closure_data_ptr: *const u8,
) -> (Model, Msg) { ) -> Result<ModelCmd, String> {
debug_assert_eq!(size_Update_result(), 8 + size_Fx() + size_Model());
let size = size_Update_result() as usize; let size = size_Update_result() as usize;
alloca::with_stack_bytes(size, |buffer| { let model_cmd = ModelCmd::alloc();
let buffer: *mut std::ffi::c_void = buffer; let buffer = model_cmd.buffer;
let buffer: *mut u8 = buffer as *mut u8;
println!("let's try update!");
call_Update( call_Update(
&msg, msg,
&model, model,
function_pointer, function_pointer,
closure_data_ptr, closure_data_ptr,
buffer as *mut u8, buffer as *mut u8,
@ -187,32 +225,15 @@ unsafe fn run_update(
let output = &*(buffer as *mut RocCallResult<()>); let output = &*(buffer as *mut RocCallResult<()>);
match output.into() { match output.into() {
Ok(_) => { Ok(_) => Ok(model_cmd),
let offset = 8 + size_Fx(); Err(e) => Err(e.to_string()),
let model_ptr = buffer.add(offset as usize); }
let model: i64 = *(model_ptr as *const i64);
let cmd_fn_ptr_ptr = buffer.add(8) as *const i64;
let cmd_fn_ptr = (*cmd_fn_ptr_ptr) as *const u8;
let cmd_closure_data_ptr = buffer.add(16);
let msg = run_fx(cmd_fn_ptr, cmd_closure_data_ptr);
(model, msg)
} }
Err(e) => panic!("failed with {}", e), fn run_roc() -> Result<(), String> {
}
})
}
#[no_mangle]
pub fn rust_main() -> isize {
let start_time = SystemTime::now();
let size = unsafe { roc_main_size() } as usize; let size = unsafe { roc_main_size() } as usize;
let layout = Layout::array::<u8>(size).unwrap(); let layout = Layout::array::<u8>(size).unwrap();
let answer = unsafe { unsafe {
let buffer = std::alloc::alloc(layout); let buffer = std::alloc::alloc(layout);
roc_main(buffer); roc_main(buffer);
@ -224,98 +245,59 @@ pub fn rust_main() -> isize {
//let closure_data_ptr = buffer.offset(16); //let closure_data_ptr = buffer.offset(16);
let closure_data_ptr = 0 as *const u8; let closure_data_ptr = 0 as *const u8;
let (mut model, mut msg) = let model_cmd =
run_init(init_fn_ptr as *const u8, closure_data_ptr as *const u8); &mut run_init(init_fn_ptr as *const u8, closure_data_ptr as *const u8).unwrap();
for _ in 0..5 { for _ in 0..5 {
let result = run_update( let model = model_cmd.model;
let cmd_fn_ptr = *(model_cmd.cmd_fn_ptr_ptr as *const usize) as *const u8;
let msg = run_fx(cmd_fn_ptr, model_cmd.cmd_closure_data_ptr);
let mut result = run_update(
msg, msg,
model, model,
update_fn_ptr as *const u8, update_fn_ptr as *const u8,
closure_data_ptr as *const u8, closure_data_ptr as *const u8,
); )
.unwrap();
model = result.0; std::mem::swap(model_cmd, &mut result);
msg = result.1;
// implictly drops `result` and `msg`
} }
std::alloc::dealloc(buffer, layout); std::alloc::dealloc(buffer, layout);
Ok(())
model
} }
Err(msg) => { Err(msg) => {
std::alloc::dealloc(buffer, layout); std::alloc::dealloc(buffer, layout);
panic!("Roc failed with message: {}", msg); Err(msg.to_string())
}
} }
} }
};
let end_time = SystemTime::now();
let duration = end_time.duration_since(start_time).unwrap();
println!(
"Roc closure took {:.4} ms to compute this answer: {:?}",
duration.as_secs_f64() * 1000.0,
// truncate the answer, so stdout is not swamped
answer
);
// Exit code
0
} }
/*
#[no_mangle] #[no_mangle]
pub fn old_rust_main() -> isize { pub fn rust_main() -> isize {
println!("Running Roc closure");
let start_time = SystemTime::now(); let start_time = SystemTime::now();
let size = unsafe { roc_main_size() } as usize;
let layout = Layout::array::<u8>(size).unwrap();
let answer = unsafe {
let buffer = std::alloc::alloc(layout);
roc_main(buffer);
let output = &*(buffer as *mut RocCallResult<()>);
match output.into() {
Ok(()) => {
let function_pointer = {
// this is a pointer to the location where the function pointer is stored
// we pass just the function pointer
let temp = buffer.offset(8) as *const i64;
(*temp) as *const u8
};
let closure_data_ptr = buffer.offset(16);
let result =
call_the_closure(function_pointer as *const u8, closure_data_ptr as *const u8);
std::alloc::dealloc(buffer, layout);
result
}
Err(msg) => {
std::alloc::dealloc(buffer, layout);
panic!("Roc failed with message: {}", msg);
}
}
};
let end_time = SystemTime::now(); let end_time = SystemTime::now();
let duration = end_time.duration_since(start_time).unwrap(); let duration = end_time.duration_since(start_time).unwrap();
match run_roc() {
Ok(answer) => {
println!( println!(
"Roc closure took {:.4} ms to compute this answer: {:?}", "Roc closure took {:.4} ms to compute this answer: {:?}",
duration.as_secs_f64() * 1000.0, duration.as_secs_f64() * 1000.0,
// truncate the answer, so stdout is not swamped
answer answer
); );
}
Err(e) => {
eprintln!("Roc failed with message {:?}", e);
}
}
// Exit code // Exit code
0 0
} }
*/

View file

@ -390,6 +390,12 @@ impl RocStr {
unsafe { core::slice::from_raw_parts(self.elements, self.length) } unsafe { core::slice::from_raw_parts(self.elements, self.length) }
} }
} }
#[allow(clippy::missing_safety_doc)]
pub unsafe fn as_str(&self) -> &str {
let slice = self.as_slice();
core::str::from_utf8_unchecked(slice)
}
} }
impl From<&str> for RocStr { impl From<&str> for RocStr {