Skip to content

Commit

Permalink
Merge pull request #14 from avsm/master
Browse files Browse the repository at this point in the history
Updates to 1.0
  • Loading branch information
djs55 committed Dec 10, 2013
2 parents aca8cdd + 23ac130 commit 7966dcb
Show file tree
Hide file tree
Showing 9 changed files with 206 additions and 147 deletions.
4 changes: 4 additions & 0 deletions CHANGES
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
1.0.0 (10-Dec-2013):
* Add ISC copyrights everywhere.
* Indent code with `ocp-indent`.

0.5.0 (8-Dec-2013)
* Block.connect: if we don't recognise the id, default to the
first available disk.
Expand Down
20 changes: 10 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,25 +1,27 @@
ocaml-xen-block-driver
mirage-xen-block
======================

This library allows an OCaml application to
This library allows a Mirage OCaml application to

1. read and write blocks from any xen "backend" (server)
2. service block requests from any xen "frontend" (client)
1. read and write blocks from any Xen "backend" (server)
2. service block requests from any Xen "frontend" (client)

This library can be used in both kernelspace (on xen)
or in userspace (using libraries that come with xen).
This library can be used in both kernelspace (on Xen)
or in userspace (using libraries that come with Xen).

This library depends on the
[shared-memory-ring](https://github.com/mirage/shared-memory-ring)
library which enables high throughput, low-latency data
transfers over shared memory on both x86 and ARM architectures.
library which enables high-throughput, low-latency data
transfers over shared memory on both x86 and ARM architectures,
using the standard Xen RPC and event channel semantics.

Example: in Mirage unikernels on xen
------------------------------------

The [block_perf](https://github.com/mirage/mirage-skeleton/tree/master/block_perf)
example shows how a Mirage application can use this library
to access a virtual disk with very little overhead.

For performance results and discussion, see
[Unikernels: Library Operating Systems for the Cloud](http://anil.recoil.org/papers/2013-asplos-mirage.pdf)

Expand All @@ -31,5 +33,3 @@ in userspace and attaches a virtual disk to an existing xen VM.
The disk read and write requests are served in userspace through
one of a set of 'backends' which are selectable on the commandline.



2 changes: 1 addition & 1 deletion _oasis
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
OASISFormat: 0.3
Name: mirage-block-xen
Version: 0.9.9
Version: 1.0.0
Synopsis: Xen block frontend and backend driver implementation
Authors: Jonathan Ludlam, Anil Madhavapeddy, David Scott
License: ISC
Expand Down
8 changes: 4 additions & 4 deletions lib/META
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# OASIS_START
# DO NOT EDIT (digest: 02c79c5a53813c4e3efca390f0487e99)
version = "0.9.9"
# DO NOT EDIT (digest: fe313703bd7172422c71fc63812ff05c)
version = "1.0.0"
description = "Xen block frontend and backend driver implementation"
requires = "cstruct cstruct.syntax"
archive(byte) = "mirage_block_xen.cma"
Expand All @@ -9,7 +9,7 @@ archive(native) = "mirage_block_xen.cmxa"
archive(native, plugin) = "mirage_block_xen.cmxs"
exists_if = "mirage_block_xen.cma"
package "front" (
version = "0.9.9"
version = "1.0.0"
description = "Xen block frontend and backend driver implementation"
requires =
"lwt lwt.syntax cstruct cstruct.syntax mirage-types shared-memory-ring shared-memory-ring.lwt mirage-block-xen"
Expand All @@ -21,7 +21,7 @@ package "front" (
)

package "back" (
version = "0.9.9"
version = "1.0.0"
description = "Xen block frontend and backend driver implementation"
requires =
"lwt lwt.syntax cstruct cstruct.syntax shared-memory-ring shared-memory-ring.lwt mirage-block-xen xenctrl"
Expand Down
118 changes: 64 additions & 54 deletions lib/blkback.ml
Original file line number Diff line number Diff line change
Expand Up @@ -68,81 +68,91 @@ let service_thread t =
let next_readonly_idx = ref 0 in

let grants_of_segments = List.map (fun seg -> {
Gnttab.domid = t.domid;
ref = Int32.to_int seg.Req.gref;
}) in
Gnttab.domid = t.domid;
ref = Int32.to_int seg.Req.gref;
}) in

let is_writable req = match req.Req.op with
| Some Req.Read -> true (* we need to write into the page *)
| Some Req.Write -> false (* we read from the guest and write to the backend *)
| _ -> failwith "Unhandled request type" in
| Some Req.Read -> true (* we need to write into the page *)
| Some Req.Write -> false (* we read from the guest and write to the backend *)
| _ -> failwith "Unhandled request type" in

let maybe_mapv writable = function
| [] -> None (* nothing to do *)
| grants ->
begin match Gnttab.mapv t.xg grants writable with
| None -> failwith "Failed to map grants" (* TODO: handle this error cleanly *)
| x -> x
end in
| [] -> None (* nothing to do *)
| grants ->
begin match Gnttab.mapv t.xg grants writable with
| None -> failwith "Failed to map grants" (* TODO: handle this error cleanly *)
| x -> x
end in

(* Prepare to map all grants on the ring: *)
Ring.Rpc.Back.ack_requests t.ring
(fun slot ->
let open Req in
let req = t.parse_req slot in
let segs = Array.to_list req.segs in
if is_writable req then begin
let grants = grants_of_segments segs in
writable_grants := !writable_grants @ grants;
requests := (req, !next_writable_idx) :: !requests;
next_writable_idx := !next_writable_idx + (List.length grants)
end else begin
let grants = grants_of_segments segs in
readonly_grants := !readonly_grants @ grants;
requests := (req, !next_readonly_idx) :: !requests;
next_readonly_idx := !next_readonly_idx + (List.length grants)
end;
let open Req in
let req = t.parse_req slot in
let segs = Array.to_list req.segs in
if is_writable req then begin
let grants = grants_of_segments segs in
writable_grants := !writable_grants @ grants;
requests := (req, !next_writable_idx) :: !requests;
next_writable_idx := !next_writable_idx + (List.length grants)
end else begin
let grants = grants_of_segments segs in
readonly_grants := !readonly_grants @ grants;
requests := (req, !next_readonly_idx) :: !requests;
next_readonly_idx := !next_readonly_idx + (List.length grants)
end;
);
(* -- at this point the ring slots may be overwritten *)
let requests = List.rev !requests in
(* Make one big writable mapping *)
let writable_mapping = maybe_mapv true !writable_grants in
let readonly_mapping = maybe_mapv false !readonly_grants in

let writable_buffer = Opt.(default empty (map Gnttab.Local_mapping.to_buf writable_mapping)) in
let readonly_buffer = Opt.(default empty (map Gnttab.Local_mapping.to_buf readonly_mapping)) in
let writable_buffer =
Opt.(default empty (map Gnttab.Local_mapping.to_buf writable_mapping)) in
let readonly_buffer =
Opt.(default empty (map Gnttab.Local_mapping.to_buf readonly_mapping)) in

let _ = (* perform everything else in a background thread *)
(* Handle each request in order *)
lwt () = Lwt_list.iter_s
(fun (request, page_offset) ->
let buffer = if is_writable request then writable_buffer else readonly_buffer in
let buffer = Bigarray.Array1.sub buffer (page_offset * page_size) (Array.length request.Req.segs * page_size) in
(* TODO: we could coalesce the segments here *)
let (_, _, threads) = List.fold_left (fun (idx, off, threads) seg ->
let page = Bigarray.Array1.sub buffer (idx * page_size) page_size in

let sector = Int64.(add request.Req.sector (of_int off)) in
let fn = if is_writable request then t.ops.read else t.ops.write in
let th = fn page sector seg.Req.first_sector seg.Req.last_sector in
let newoff = off + (seg.Req.last_sector - seg.Req.first_sector + 1) in

idx + 1, newoff, th :: threads
) (0, 0, []) (Array.to_list request.Req.segs) in

lwt () = Lwt.join threads in
let open Res in
let slot = Ring.Rpc.Back.(slot t.ring (next_res_id t.ring)) in
(* These responses aren't visible until pushed (below) *)
write_response (request.Req.id, {op=request.Req.op; st=Some OK}) slot;
return ()
) requests in
(fun (request, page_offset) ->
let buffer =
if is_writable request then
writable_buffer else
readonly_buffer in
let buffer = Bigarray.Array1.sub buffer
(page_offset * page_size)
(Array.length request.Req.segs * page_size) in
(* TODO: we could coalesce the segments here *)
let (_, _, threads) = List.fold_left (fun (idx, off, threads) seg ->
let page = Bigarray.Array1.sub buffer (idx * page_size) page_size in

let sector = Int64.(add request.Req.sector (of_int off)) in
let fn = if is_writable request then t.ops.read else t.ops.write in
let th = fn page sector seg.Req.first_sector seg.Req.last_sector in
let newoff = off + (seg.Req.last_sector - seg.Req.first_sector + 1) in

idx + 1, newoff, th :: threads
) (0, 0, []) (Array.to_list request.Req.segs) in

lwt () = Lwt.join threads in
let open Res in
let slot = Ring.Rpc.Back.(slot t.ring (next_res_id t.ring)) in
(* These responses aren't visible until pushed (below) *)
write_response (request.Req.id, {op=request.Req.op; st=Some OK}) slot;
return ()
) requests in

(* We must unmap before pushing because the frontend will attempt
to reclaim the pages (without this you get "g.e. still in use!"
errors from Linux *)
let () = try Opt.iter (Gnttab.unmap_exn t.xg) readonly_mapping with e -> printf "Failed to unmap: %s\n%!" (Printexc.to_string e) in
let () = try Opt.iter (Gnttab.unmap_exn t.xg) writable_mapping with e -> printf "Failed to unmap: %s\n%!" (Printexc.to_string e) in
let () = try
Opt.iter (Gnttab.unmap_exn t.xg) readonly_mapping
with e -> printf "Failed to unmap: %s\n%!" (Printexc.to_string e) in
let () = try Opt.iter (Gnttab.unmap_exn t.xg) writable_mapping
with e -> printf "Failed to unmap: %s\n%!" (Printexc.to_string e) in
(* Make the responses visible to the frontend *)
let notify = Ring.Rpc.Back.push_responses_and_check_notify t.ring in
if notify then Eventchn.notify t.xe t.evtchn;
Expand All @@ -161,8 +171,8 @@ let init xg xe domid ring_info wait ops =
| Protocol.Native -> Req.Proto_64.read_request, Req.Proto_64.total_size
in
let grants = List.map (fun r ->
{ Gnttab.domid = domid; ref = Int32.to_int r })
[ ring_info.RingInfo.ref ] in
{ Gnttab.domid = domid; ref = Int32.to_int r })
[ ring_info.RingInfo.ref ] in
match Gnttab.mapv xg grants true with
| None ->
failwith "Gnttab.mapv failed"
Expand Down
Loading

0 comments on commit 7966dcb

Please sign in to comment.