1
+ // Copyright © 2020, Oracle and/or its affiliates.
2
+ //
1
3
// Copyright (c) 2019 Intel Corporation. All rights reserved.
2
4
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
5
//
@@ -36,6 +38,13 @@ use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestUsize};
36
38
#[ allow( missing_docs) ]
37
39
#[ cfg_attr( feature = "cargo-clippy" , allow( clippy:: all) ) ]
38
40
pub mod bootparam;
41
+
42
+ #[ cfg( feature = "elf" ) ]
43
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
44
+ #[ allow( missing_docs) ]
45
+ #[ cfg_attr( feature = "cargo-clippy" , allow( clippy:: all) ) ]
46
+ pub mod start_info;
47
+
39
48
#[ allow( dead_code) ]
40
49
#[ allow( non_camel_case_types) ]
41
50
#[ allow( non_snake_case) ]
@@ -93,6 +102,12 @@ pub enum Error {
93
102
SeekBzImageHeader ,
94
103
/// Unable to seek to bzImage compressed kernel.
95
104
SeekBzImageCompressedKernel ,
105
+ /// Unable to seek to note header.
106
+ SeekNoteHeader ,
107
+ /// Unable to read note header.
108
+ ReadNoteHeader ,
109
+ /// Invalid PVH note.
110
+ InvalidPvhNote ,
96
111
}
97
112
98
113
/// A specialized `Result` type for the kernel loader.
@@ -125,6 +140,9 @@ impl error::Error for Error {
125
140
Error :: SeekBzImageEnd => "Unable to seek bzImage end" ,
126
141
Error :: SeekBzImageHeader => "Unable to seek bzImage header" ,
127
142
Error :: SeekBzImageCompressedKernel => "Unable to seek bzImage compressed kernel" ,
143
+ Error :: SeekNoteHeader => "Unable to seek to note header" ,
144
+ Error :: ReadNoteHeader => "Unable to read note header" ,
145
+ Error :: InvalidPvhNote => "Invalid PVH note header" ,
128
146
}
129
147
}
130
148
}
@@ -150,6 +168,10 @@ pub struct KernelLoaderResult {
150
168
/// This field is only for bzImage following https://www.kernel.org/doc/Documentation/x86/boot.txt
151
169
/// VMM should make use of it to fill zero page for bzImage direct boot.
152
170
pub setup_header : Option < bootparam:: setup_header > ,
171
+ /// This field optionally holds the address of a PVH entry point, indicating that
172
+ /// the kernel supports the PVH boot protocol as described in:
173
+ /// https://xenbits.xen.org/docs/unstable/misc/pvh.html
174
+ pub pvh_entry_addr : Option < GuestAddress > ,
153
175
}
154
176
155
177
/// A kernel image loading support must implement the KernelLoader trait.
@@ -247,6 +269,10 @@ impl KernelLoader for Elf {
247
269
// Read in each section pointed to by the program headers.
248
270
for phdr in & phdrs {
249
271
if phdr. p_type != elf:: PT_LOAD || phdr. p_filesz == 0 {
272
+ if phdr. p_type == elf:: PT_NOTE {
273
+ // This segment describes a Note, check if PVH entry point is encoded.
274
+ loader_result. pvh_entry_addr = parse_elf_note ( phdr, kernel_image) ?;
275
+ }
250
276
continue ;
251
277
}
252
278
@@ -280,6 +306,75 @@ impl KernelLoader for Elf {
280
306
}
281
307
}
282
308
309
+ #[ cfg( feature = "elf" ) ]
310
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
311
+ fn parse_elf_note < F > ( phdr : & elf:: Elf64_Phdr , kernel_image : & mut F ) -> Result < Option < GuestAddress > >
312
+ where
313
+ F : Read + Seek ,
314
+ {
315
+ let n_align = phdr. p_align ;
316
+
317
+ // Seek to the beginning of the note segment
318
+ kernel_image
319
+ . seek ( SeekFrom :: Start ( phdr. p_offset ) )
320
+ . map_err ( |_| Error :: SeekNoteHeader ) ?;
321
+
322
+ // Now that the segment has been found, we must locate an ELF note with the
323
+ // correct type that encodes the PVH entry point if there is one.
324
+ let mut nhdr: elf:: Elf64_Nhdr = Default :: default ( ) ;
325
+ let mut read_size: usize = 0 ;
326
+
327
+ while read_size < phdr. p_filesz as usize {
328
+ unsafe {
329
+ // read_struct is safe when reading a POD struct.
330
+ // It can be used and dropped without issue.
331
+ struct_util:: read_struct ( kernel_image, & mut nhdr) . map_err ( |_| Error :: ReadNoteHeader ) ?;
332
+ }
333
+ // If the note header found is not the desired one, keep reading until
334
+ // the end of the segment
335
+ if nhdr. n_type == elf:: XEN_ELFNOTE_PHYS32_ENTRY {
336
+ break ;
337
+ }
338
+ // Skip the note header plus the size of its fields (with alignment)
339
+ read_size += mem:: size_of :: < elf:: Elf64_Nhdr > ( )
340
+ + align_up ( u64:: from ( nhdr. n_namesz ) , n_align)
341
+ + align_up ( u64:: from ( nhdr. n_descsz ) , n_align) ;
342
+
343
+ kernel_image
344
+ . seek ( SeekFrom :: Start ( phdr. p_offset + read_size as u64 ) )
345
+ . map_err ( |_| Error :: SeekNoteHeader ) ?;
346
+ }
347
+
348
+ if read_size >= phdr. p_filesz as usize {
349
+ return Ok ( None ) ; // PVH ELF note not found, nothing else to do.
350
+ }
351
+ // Otherwise the correct note type was found.
352
+ // The note header struct has already been read, so we can seek from the
353
+ // current position and just skip the name field contents.
354
+ kernel_image
355
+ . seek ( SeekFrom :: Current (
356
+ align_up ( u64:: from ( nhdr. n_namesz ) , n_align) as i64 ,
357
+ ) )
358
+ . map_err ( |_| Error :: SeekNoteHeader ) ?;
359
+
360
+ // The PVH entry point is a 32-bit address, so the descriptor field
361
+ // must be capable of storing all such addresses.
362
+ if ( nhdr. n_descsz as usize ) < mem:: size_of :: < u32 > ( ) {
363
+ return Err ( Error :: InvalidPvhNote ) ;
364
+ }
365
+
366
+ let mut pvh_addr_bytes = [ 0 ; mem:: size_of :: < u32 > ( ) ] ;
367
+
368
+ // Read 32-bit address stored in the PVH note descriptor field.
369
+ kernel_image
370
+ . read_exact ( & mut pvh_addr_bytes)
371
+ . map_err ( |_| Error :: ReadNoteHeader ) ?;
372
+
373
+ Ok ( Some ( GuestAddress (
374
+ u32:: from_le_bytes ( pvh_addr_bytes) . into ( ) ,
375
+ ) ) )
376
+ }
377
+
283
378
#[ cfg( feature = "bzimage" ) ]
284
379
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
285
380
/// Big zImage (bzImage) kernel image support.
@@ -409,6 +504,23 @@ pub fn load_cmdline<M: GuestMemory>(
409
504
Ok ( ( ) )
410
505
}
411
506
507
+ /// Align address upwards. Taken from x86_64 crate:
508
+ /// https://docs.rs/x86_64/latest/x86_64/fn.align_up.html
509
+ ///
510
+ /// Returns the smallest x with alignment `align` so that x >= addr. The alignment must be
511
+ /// a power of 2.
512
+ #[ cfg( feature = "elf" ) ]
513
+ #[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
514
+ fn align_up ( addr : u64 , align : u64 ) -> usize {
515
+ assert ! ( align. is_power_of_two( ) , "`align` must be a power of two" ) ;
516
+ let align_mask = align - 1 ;
517
+ if addr & align_mask == 0 {
518
+ addr as usize // already aligned
519
+ } else {
520
+ ( ( addr | align_mask) + 1 ) as usize
521
+ }
522
+ }
523
+
412
524
#[ cfg( test) ]
413
525
mod test {
414
526
use super :: * ;
0 commit comments